Compare commits

...

251 Commits

Author SHA1 Message Date
Sergey Kuznetsov
c1037785a1 Upgrade xrpl to 2.2.0 (#1443) 2024-06-04 16:02:28 +01:00
Sergey Kuznetsov
b1aafbaacb Backport compiler flags for gcc-12 2024-06-03 13:43:38 +01:00
Sergey Kuznetsov
965a2883fe Fix formatting 2024-06-03 13:11:24 +01:00
Sergey Kuznetsov
1e9eaee311 Fix formatting 2024-06-03 13:03:21 +01:00
Sergey Kuznetsov
a15ee2a8cc Updated xrpl to 2.2.0-rc3 2024-06-03 13:02:51 +01:00
Alex Kremer
26ed78fc05 Cherry-pick XRPFees bugfix 2024-03-20 18:15:57 +00:00
Sergey Kuznetsov
8575f786a8 Comment out macOS CI (#1164) 2024-02-07 15:57:50 +00:00
Alex Kremer
08b02c64cb Fix amm_info amounts in output map to user input (#1162)
Fixes #1156
2024-02-06 21:12:57 +00:00
Alex Kremer
b358649cf9 Add missing include (#1161)
Fixes #1160
2024-02-06 12:35:35 +00:00
github-actions[bot]
6bd72355db [CI] clang-tidy auto fixes (#1159)
Fixes #1158
2024-02-06 12:08:44 +00:00
Alex Kremer
a1699d7484 Rename headers to .hpp (#1154)
Fixes #1153
2024-02-05 13:10:50 +00:00
Sergey Kuznetsov
957aadd25a Requests library (#1140)
For #51.

First part of improving forwarding - library for easy async requests.
2024-02-05 11:35:10 +00:00
Alex Kremer
8f89a5913d Fix paging bug in range deletion tool (#1150) 2024-02-02 16:29:17 +00:00
github-actions[bot]
ecfe5e84e5 [CI] clang-tidy auto fixes (#1152)
Co-authored-by: kuznetsss <kuznetsss@users.noreply.github.com>
2024-02-02 09:34:08 +00:00
Alex Kremer
03c0940649 Fix most includes in headers (#1149)
Fixes #1146
2024-02-01 12:49:11 +00:00
cyan317
dc5aacfe39 Side chain ledgerentry (#1144)
Fix #861
2024-02-01 09:12:24 +00:00
Alex Kremer
3fda74e3f7 Cassandra data removal tool (#1142)
Fixes #1143
2024-01-30 13:27:42 +00:00
Santiago Reig
df27c4e629 Forward server_state to rippled (#1135)
Fixes #1138.
2024-01-25 15:56:31 +00:00
cyan317
37ee74c293 Fix bookbase (#1139)
Fix #1137
2024-01-25 14:01:01 +00:00
Bronek Kozicki
ec335176bb Fixes for gcc 13 (#1128)
* Add include <cstdint> where needed
* Add .devcontainer to .gitignore
* Document gcc-13 compilation fix for rocksdb
2024-01-25 12:01:11 +00:00
cyan317
ab33b26ec4 Fix ETL race condition problem (#1132)
Wait for previous publish being finished to switch to writer.
2024-01-24 16:55:08 +00:00
cyan317
28c8fa2a9a Ledger entry type filter for account_objects and ledger_data (#1116)
Fix #1109
2024-01-17 17:29:59 +00:00
Alex Kremer
12bbed194c Rerun clang tidy on fix merge (#1124) 2024-01-16 18:14:09 +00:00
Alex Kremer
1fa09006f8 Trigger clang-tidy restart via git commands (#1123) 2024-01-16 16:44:35 +00:00
Alex Kremer
e3b6fc4bd4 [CI] clang-tidy auto fixes (manual) (#1122)
Fixes #1121
2024-01-16 16:13:52 +00:00
Alex Kremer
34594ff8c0 [CI] clang-tidy auto fixes (FAKE4) (#1120) 2024-01-16 14:49:28 +00:00
Alex Kremer
40eeb57920 Add permission for actions (#1119) 2024-01-16 14:44:04 +00:00
Alex Kremer
3eb36c049c [CI] clang-tidy auto fixes (FAKE3) (#1118) 2024-01-16 14:32:40 +00:00
Alex Kremer
81602e8ae7 Change to running workflow via gh (#1117) 2024-01-16 14:27:21 +00:00
Alex Kremer
0cef9e0620 [CI] clang-tidy auto fixes (FAKE2) (#1115) 2024-01-16 12:50:07 +00:00
Alex Kremer
81d1b30607 Use contains syntax to grep for title (#1114) 2024-01-16 12:46:02 +00:00
Alex Kremer
923d021c83 [CI] clang-tidy auto fixes (FAKE) (#1113) 2024-01-16 12:36:29 +00:00
Alex Kremer
cd2b09ffb7 Use contains syntax to grep for label (#1112) 2024-01-16 12:28:48 +00:00
github-actions[bot]
3c62a1f42c [CI] clang-tidy auto fixes (#1111)
Co-authored-by: kuznetsss <kuznetsss@users.noreply.github.com>
2024-01-16 09:19:57 +00:00
cyan317
f97e0690c8 Account tx type improvement (#1108)
Fix #1090
2024-01-16 09:18:47 +00:00
Alex Kremer
eeaccbabd9 Add attempt at auto rerun functionality (#1105)
Attempting to refactor clang-tidy as an action and reuse it from two workflows.
2024-01-15 19:48:59 +00:00
Alex Kremer
13d2d4e2ca Enable DB tests via ScyllaDB service (#1103)
Fixes #1092
2024-01-15 12:09:00 +00:00
cyan317
350a45e7e2 Fix unstable unittest (#1102)
Properly mock wsbase
2024-01-15 12:06:14 +00:00
Alex Kremer
ce86572274 Fix forwarded flag placement (#1101)
Fixes #1091
2024-01-12 14:02:50 +00:00
github-actions[bot]
ac97788db8 [CI] clang-tidy auto fixes (#1099)
Fixes #1098. Fixes #1100.

---------

Co-authored-by: kuznetsss <kuznetsss@users.noreply.github.com>
Co-authored-by: Sergey Kuznetsov <skuznetsov@ripple.com>
2024-01-12 12:29:51 +00:00
Alex Kremer
2893492569 Remove legacy hook (#1097) 2024-01-11 17:49:00 +00:00
Alex Kremer
b63e98bda0 Update libxrpl to 2.0.0 (#1096) 2024-01-11 16:36:39 +00:00
Alex Kremer
f4df5c2185 Implement amm_info handler (#1060)
Fixes #283
2024-01-11 15:57:53 +00:00
github-actions[bot]
93d5c12b14 [CI] clang-tidy auto fixes (#1094)
Co-authored-by: kuznetsss <kuznetsss@users.noreply.github.com>
2024-01-11 09:37:54 +00:00
cyan317
2514b7986e Fix unstable test (#1089) 2024-01-10 16:56:57 +00:00
cyan317
d30e63d49a add api_version to response (#1088)
Fix #1020
2024-01-09 15:53:09 +00:00
github-actions[bot]
61f1e0853d [CI] clang-tidy auto fixes (#1086)
Co-authored-by: kuznetsss <kuznetsss@users.noreply.github.com>
2024-01-09 09:35:42 +00:00
cyan317
eb1831c489 New subscription manager (#1071)
Fix #886
2024-01-08 14:45:57 +00:00
Shi Cheng
07bd4b0760 upload clio_server artificat (#1083) 2024-01-08 10:49:53 +00:00
Alex Kremer
e26a1e37b5 Improve batching code (#1079)
Fixes #1077
2024-01-05 15:44:30 +00:00
Sergey Kuznetsov
e89640bcfb Add debug cache to ci (#1078)
Fixes #1066
2024-01-05 10:59:26 +00:00
github-actions[bot]
ae135759ef [CI] clang-tidy auto fixes (#1081)
Co-authored-by: kuznetsss <kuznetsss@users.noreply.github.com>
2024-01-05 09:31:56 +00:00
Alex Kremer
28188aa0f9 Add batching to writes (#1076)
Fixes #1077
2024-01-04 15:17:15 +00:00
Sergey Kuznetsov
af485a0634 Add gcovr to CI docker image (#1072)
For #1066
2024-01-03 16:53:26 +00:00
github-actions[bot]
b609298870 [CI] clang-tidy auto fixes (#1070)
Co-authored-by: kuznetsss <kuznetsss@users.noreply.github.com>
2024-01-03 08:53:47 +00:00
Alex Kremer
d077093a8d Simplify backend mock access for unittests (#1062) 2024-01-02 13:35:57 +00:00
Alex Kremer
781f3b3c48 Bump libxrpl version to 2.0.0-rc6 (#1061)
Fixes #1063
2023-12-23 20:28:07 +00:00
Bronek Kozicki
a8bae96ad4 Add coverage_report target (#1058) 2023-12-21 15:08:32 +00:00
Bronek Kozicki
fe9649d872 Fix c++20 requires syntax (#1057) 2023-12-19 20:52:53 +00:00
Sergey Kuznetsov
431b5f5ab8 Add ccache mention in docs (#1055) 2023-12-18 16:43:15 +00:00
Sergey Kuznetsov
b1dc2775fb Remove exception text from error sending (#1048)
Fixes #1037
2023-12-13 16:30:16 +00:00
Elliot Lee
dd35a7cfd2 Update CONTRIBUTING.md (#1047) 2023-12-13 15:47:07 +00:00
github-actions[bot]
a9d685d5c0 [CI] clang-tidy auto fixes (#1046)
Fixes #1045.
2023-12-13 15:08:53 +00:00
Sergey Kuznetsov
6065d324b5 Remove push-to-fork in clang-tidy workflow 2023-12-13 14:23:21 +00:00
Sergey Kuznetsov
fe7b5fe18f Another try to sign commit in CI (#1043) 2023-12-13 13:54:28 +00:00
Sergey Kuznetsov
1c663988f5 Use different token to sign commits (#1041)
For #884
2023-12-13 13:23:24 +00:00
Sergey Kuznetsov
d11d566121 Fix wrong image (#1040)
For #884
2023-12-13 12:49:44 +00:00
Sergey Kuznetsov
a467cb2526 Add signing clang-tidy commit (#1036)
Fixes #884
2023-12-12 18:04:40 +00:00
Sergey Kuznetsov
f62e36dc94 Add status to readme (#1035)
For #844
2023-12-12 17:07:51 +00:00
Sergey Kuznetsov
d933ce2a29 Use clio_ci docker image (#1033)
Fixes #884
2023-12-12 16:03:08 +00:00
Sergey Kuznetsov
db751e3807 Make root default user in CI image (#1034)
For #884
2023-12-12 14:05:30 +00:00
Sergey Kuznetsov
3c4a8f0cfb Add conan setup into image (#1032)
For #884
2023-12-12 12:00:57 +00:00
Sergey Kuznetsov
397ce97175 Fix docker publish (#1027)
Fixes docker build for #884
2023-12-11 17:08:42 +00:00
Sergey Kuznetsov
ac6ad13f6c Fix release notes (#1022)
Fixes release notes for #884
2023-12-11 15:52:36 +00:00
Sergey Kuznetsov
7d1d1749bc Another fix of clang-tidy workflow (#1026)
Another fix for clang-tidy nightly check for #884
2023-12-11 15:11:30 +00:00
Sergey Kuznetsov
acf359d631 Fix permissions issue for clang-tidy (#1023)
Fixes issue creation for clang-tidy nightly checks for #884
2023-12-11 11:53:22 +00:00
Sergey Kuznetsov
a34e107b86 Add nightly builds (#1013)
Partially fixes #884.
Adds:
- Docker image for CI on Linux
- Nightly builds without cache and releases
- Nightly clang-tidy checks
- Fix typos in .clang-tidy
2023-12-08 18:22:22 +00:00
cyan317
b886586de3 Unify ledger_index type (#1019)
Fix #1014
2023-12-08 14:20:40 +00:00
cyan317
a57abb15a3 Fix example json format (#1018) 2023-12-05 12:45:01 +00:00
cyan317
c87586a265 Fix compiler error: header missing (#1016) 2023-12-04 13:45:48 +00:00
cyan317
8172670c93 Add close_time_iso to transaction stream (#1012)
Fix #1011
2023-11-30 13:32:50 +00:00
Sergey Kuznetsov
3fdcd3315b Make assert write to both log file and cerr (#1009) 2023-11-30 10:33:52 +00:00
cyan317
dd018f1c5e Fix ledger close_time_iso(#1008)
Fix #1007
2023-11-29 18:04:12 +00:00
Sergey Kuznetsov
c2b462da75 Fix paste on mac (#1006) 2023-11-29 15:41:45 +00:00
Sergey Kuznetsov
252920ec57 Fix CI 2023-11-29 15:24:50 +00:00
Sergey Kuznetsov
9ef6801c55 Fix git hook 2023-11-29 15:24:50 +00:00
Sergey Kuznetsov
24c562fa2a Add hostname resolving to dosguard (#1000)
Fixes #983.

Cassandra, ETL sorces and cache already support hostname resolving.

Also added config to show missing includes by clangd.
2023-11-29 15:13:40 +00:00
Sergey Kuznetsov
35f119a268 Switch to llvm 17 tools (#1002)
Fixes #952
2023-11-28 20:09:58 +00:00
Sergey Kuznetsov
1be368dcaf Fix wrong assert (#1003) 2023-11-28 14:06:17 +00:00
cyan317
a5fbb01299 fix (#999)
Fix #985
2023-11-24 16:01:27 +00:00
Sergey Kuznetsov
3b75d88a35 Add server_definitions to forwarding set (#996)
Fixes #942
2023-11-22 16:21:03 +00:00
cyan317
f0224581a5 Fix nfts_by_issuer's DB issue (#997)
Fix #988
2023-11-22 15:55:46 +00:00
Sergey Kuznetsov
b998473673 Add compression and histogram metric type for Prometheus (#987)
Fixes #932
Also fixes #966

Decided not to add Summary type because it has the same functionality as Histogram but makes more calculations on client side (Clio side). See https://prometheus.io/docs/practices/histograms for detailed comparison.
2023-11-22 12:55:06 +00:00
Sergey Kuznetsov
8ebe2d6a80 Add assertion that terminate clio (#994)
Fixes #893.

Also added termination handler to print backtrace on crash, so fixes #929.
2023-11-21 13:06:04 +00:00
Sergey Kuznetsov
3bab90ca7a Comment out gcc-only checks (#995) 2023-11-21 09:53:08 +00:00
cyan317
74660aebf1 binary (#993)
Fix #984
2023-11-20 17:53:34 +00:00
cyan317
db08de466a Unify json (#992)
Fix #962
2023-11-20 13:09:28 +00:00
Alex Kremer
1bacad9e49 Update xrpl version to 2.0.0-rc1 (#990)
Fixes #989
2023-11-15 19:40:38 +00:00
cyan317
ca16858878 Add DeliverMax for Tx streams (#980) 2023-11-13 13:29:36 +00:00
cyan317
feae85782c DeliverMax alias of Payment tx (#979)
Fix #973
2023-11-09 13:35:08 +00:00
cyan317
b016c1d7ba Fix lowercase ctid (#977)
Fix #963
2023-11-07 16:10:12 +00:00
Sergey Kuznetsov
0597a9d685 Add amm type to account objects (#975)
Fixes #834
2023-11-03 13:54:54 +00:00
cyan317
05bea6a971 add amm filter (#972)
Fix #968
2023-11-03 13:12:36 +00:00
cyan317
fa660ef400 Implement DID (#967)
Fix #918
2023-11-03 09:40:40 +00:00
Arihant Kothari
25d9e3cc36 Use .empty() instead of .size() for vectors (#971) 2023-11-02 23:02:00 +00:00
Sergey Kuznetsov
58f13e1660 Fix code inside assert (#969) 2023-11-02 21:34:03 +00:00
Sergey Kuznetsov
a16b680a7a Add prometheus support (#950)
Fixes #888
2023-11-02 17:26:03 +00:00
cyan317
320ebaa5d2 Move AdminVerificationStrategy to Server (#965) 2023-11-02 10:17:32 +00:00
Alex Kremer
058df4d12a Fix exit of ETL on exception (#964)
Fixes #708
2023-11-01 11:59:19 +00:00
Alex Kremer
5145d07693 Update conan to use xrpl 2.0.0-b4 (#961)
Fixes #960
2023-10-31 19:27:06 +00:00
cyan317
5e9e5f6f65 Admin password (#958)
Fix #922
2023-10-31 15:39:20 +00:00
Sergey Kuznetsov
1ce7bcbc28 Fix random source choosing (#959) 2023-10-31 15:04:15 +00:00
Shawn Xie
243858df12 nfts_by_issuer (#948)
Fixes issue #385

Original PR:
#584
2023-10-30 19:53:32 +00:00
Sergey Kuznetsov
b363cc93af Fix wrong random using (#955)
Fixes #855
2023-10-30 16:40:16 +00:00
Sergey Kuznetsov
200d97f0de Add AMM types to AccountTx filter (#954) 2023-10-30 16:36:28 +00:00
cyan317
1ec5d3e5a3 Amm ledgerentry (#951)
Fix #916
2023-10-30 15:23:47 +00:00
cyan317
e062121917 Add config to run without valid etl (#946)
Fix #943
2023-10-20 16:22:25 +01:00
Alex Kremer
1aab2b94b1 Move to clang-format-16 (#908)
Fixes #848
2023-10-19 16:55:04 +01:00
Sergey Kuznetsov
5de87b9ef8 Upgrade fmt to 10.1.1 (#937) 2023-10-18 17:11:31 +01:00
Sergey Kuznetsov
398db13f4d Add help part to readme (#938) 2023-10-18 17:10:51 +01:00
cyan317
5e8ffb66b4 Subscribe cleanup (#940)
Fix #939
2023-10-18 15:45:54 +01:00
cyan317
939740494b Fix dosguard max_connection (#927)
Fix #928
2023-10-13 13:10:33 +01:00
Alex Kremer
ff3d2b5600 Set libxrpl version to 2.0.0-b2 (#926)
Fixes #925
2023-10-13 12:38:39 +01:00
cyan317
7080b4d549 Fix messages pile up (#921)
Fix #924
2023-10-11 17:24:39 +01:00
cyan317
8d783ecd6a ctid for tx (#907)
Fix #898 and #917
2023-10-11 09:47:05 +01:00
Sergey Kuznetsov
5e6682ddc7 Add db usage counters (#912)
Fixes #911
2023-10-10 18:34:28 +01:00
Alex Kremer
fca29694a0 Fix http params handling discrepancy (#913)
Fixes #909
2023-10-10 12:23:40 +01:00
Sergey Kuznetsov
a541e6d00e Update gtest version (#900) 2023-10-09 15:36:28 +01:00
Alex Kremer
9bd38dd290 Update cassandra version (#844)
Fixes #843
2023-10-09 13:28:11 +01:00
Alex Kremer
f683b25f76 Add field name to output of invalidParams for OneOf (#906)
Fixes #901
2023-10-09 13:26:54 +01:00
cyan317
91ad1ffc3b Fix error "readability-else-after-return" (#905)
Fix compile error
2023-10-09 11:32:29 +01:00
cyan317
64b4a908da Fix account_tx response both both ledger range and ledger index/hash are specified (#904)
Fix mismatch with rippled
2023-10-09 10:19:07 +01:00
Alex Kremer
ac752c656e Change consume to full buffer recreate (#899) 2023-10-06 14:57:05 +01:00
Alex Kremer
4fe868aaeb Add inLedger to tx and account_tx (#895)
Fixes #890
2023-10-05 21:16:52 +01:00
cyan317
59eb40a1f2 Fix ledger_entry error code (#891)
Fix #896
2023-10-05 18:11:42 +01:00
Alex Kremer
0b5f667e4a Fixes broken counters for broken pipe connections (#880)
Fixes #885
2023-10-04 16:59:40 +01:00
cyan317
fa42c5c900 Fix trans order of subscription transactions stream (#882)
Fix #833
2023-10-04 09:11:32 +01:00
Sergey Kuznetsov
0818b6ce5b Add admin password check (#847)
Fixes #846
2023-10-03 17:22:37 +01:00
cyan317
e2cc56d25a Add unittests for ledger publisher and bug fixes (#860)
Fix #881
2023-10-03 13:47:49 +01:00
Sergey Kuznetsov
caaa01bf0f Add tests for special characters in currency validator (#872)
Fixes #835

We are using xrpl's function to check currency code is valid so there is no need to change our code.
I added more test cases to be sure that clio supports characters added in xrpl.
2023-10-03 12:13:17 +01:00
Sergey Kuznetsov
4b53bef1f5 Add clang tidy (#864)
Fixes #863
2023-10-03 10:43:54 +01:00
Sergey Kuznetsov
69f5025a29 Add compiler flags (#850)
Fixes #435
2023-10-02 16:45:48 +01:00
Sergey Kuznetsov
d1c41a8bb7 Don't use clio for conan cache hash (#879) 2023-10-02 11:43:51 +01:00
Sergey Kuznetsov
207ba51461 Fix CI (#878)
* Put conan-non-prod artifactory first

* Rebuild all conan packages if no cache

* Save cache only if there was no cache found
2023-09-28 16:49:15 +01:00
Sergey Kuznetsov
ebe7688ccb Api v1 bool support (#877)
* Allow not bool for signer_lists

* Allow transactions to be not bool for v1

* Add tests for JsonBool
2023-09-28 12:56:38 +01:00
Sergey Kuznetsov
6d9f8a7ead CI improvements (#867)
* Generate conan profile in CI

* Move linux build into main workflow

* Add saving/restoring conan data

* Move cache to Linux

* Fix error

* Change key to hash from conanfile

* Fix path error

* Populate cache only in develop branch

* Big refactor

- Move duplicated code to actions
- Isolate mac build from home directory
- Separate ccache and conan caches

* Fix errors

* Change ccache cache name and fix errors

* Always populate cache

* Use newer ccache on Linux

* Strip tests

* Better conan hash
2023-09-28 11:36:03 +01:00
Sergey Kuznetsov
6ca777ea96 Account tx v1 api support (#874)
* Don't fail on ledger params for v1

* Different error on invalid ledger indexes for v1

* Allow forward and binary to be not bool for v1

* Minor fixes

* Fix tests

* Don't fail if input ledger index is out of range for v1

* Restore deleted test

* Fix comparison of integers with different signedness

* Updated default api version in README and example config
2023-09-28 11:31:35 +01:00
cyan317
963685dd31 Ledger_entry return invalid parameter error for v1 (#873)
Fixes #875
2023-09-28 09:14:01 +01:00
cyan317
e36545058d Duplicate signer_lists in account_info (#870)
Fix #871
2023-09-25 13:24:16 +01:00
cyan317
44527140f0 Fix inaccurate coverage caused by LOG (#868)
Fix #845
2023-09-21 16:19:53 +01:00
Alex Kremer
0eaaa1fb31 Add workaround for async_compose (#841)
Fixes #840
2023-09-18 18:52:32 +01:00
Alex Kremer
1846f629a5 AccountTx filtering by transaction type (#851)
Fixes #685
2023-09-18 18:52:00 +01:00
Alex Kremer
83af5af3c6 Remove deprecated cassandra options (#852)
Fixes #849
2023-09-18 13:40:38 +01:00
Alex Kremer
418a0ddbf2 Add libxrpl version to server_info output (#854)
Fixes #853
2023-09-18 13:39:01 +01:00
Alex Kremer
6cfbfda014 Repeatedly log on amendment block (#829)
Fixes #364
2023-09-13 13:34:02 +01:00
Alex Kremer
91648f98ad Fix malformed taker error to match rippled (#827)
Fixes #352
2023-09-11 19:39:10 +01:00
Sergey Kuznetsov
71e1637c5f Add options for better clangd support (#836)
Fixes #839
2023-09-11 17:53:30 +01:00
Sergey Kuznetsov
59cd2ce5aa Fix missing lock (#837) 2023-09-11 16:19:57 +01:00
Alex Kremer
d783edd57a Add working dir to git command executions (#828) 2023-09-11 13:40:22 +01:00
cyan317
1ce8a58167 Add number of requests to log (#838) 2023-09-11 12:58:45 +01:00
Peter Chen
92e5c4792b Change boost::json to json in unittests (#831) 2023-09-11 12:39:38 +01:00
Michael Legleux
d7f36733bc Link libstd++ and gcc lib statically (#830) 2023-08-24 12:41:08 +01:00
Alex Kremer
435d56e7c5 Fix empty log lines (#825) 2023-08-16 22:57:52 +01:00
Alex Kremer
bf3b24867c Implement sanitizer support via CMake (#822)
Fixes #302
2023-08-15 15:20:50 +01:00
Alex Kremer
ec70127050 Add LOG macro to prevent unnecessary evaluations (#823)
Fixes #824
2023-08-15 14:36:11 +01:00
Alex Kremer
547cb340bd Update doxygen comments (#818)
Fixes #421
2023-08-11 21:32:32 +01:00
Arihant Kothari
c20b14494a Add note on code coverage report generation (#821) 2023-08-11 18:38:35 +01:00
Peter Chen
696b1a585c Refactor namespaces part 2 (#820)
Part 2 of refactoring effort
2023-08-11 17:00:31 +01:00
Peter Chen
23442ff1a7 Refactor namespaces part 1 (#817)
Part 1 of refactoring effort
2023-08-10 18:05:13 +01:00
Alex Kremer
db4046e02a Move connection state logic to an earlier point (#816) 2023-08-09 16:42:33 +01:00
Peter Chen
fc1b5ae4da Support whitelisting for IPV4/IPV6 with CIDR (#796)
Fixes #244
2023-08-08 16:04:16 +01:00
Alex Kremer
5411fd7497 Update readme and compiler requirements (#815) 2023-08-08 11:32:18 +01:00
Michael Legleux
f6488f7024 Fix Linux/gcc build on CI (#813) 2023-08-07 20:53:20 +01:00
cyan317
e3ada6c5da Remove deprecated fields (#814)
Fix #801
2023-08-07 18:23:13 +01:00
cyan317
d61d702ccd Account_info add flags (#812)
Fixes #768
2023-08-04 16:22:39 +01:00
Alex Kremer
4d42cb3cdb Expose advanced options from cassandra-cpp-driver thru the config (#808)
Fixes #810
2023-08-03 15:49:56 +01:00
cyan317
111b55b397 Add cache test (#807)
Fixes #809
2023-08-03 15:03:17 +01:00
cyan317
c90bc15959 Return error when limit<=0 (#804)
Fix #806
2023-08-02 15:34:42 +01:00
Shawn Xie
1804e3e9c0 Update tmp build instructions in README (#802) 2023-08-02 13:45:04 +01:00
Alex Kremer
24f69acd9e Fix Linux/gcc compilation (#795)
Fixes #803
2023-08-02 13:44:03 +01:00
Alex Kremer
98d0a963dc Fix backend factory test and remove cout from base tests (#792)
Fixes #793
2023-07-27 15:40:50 +01:00
cyan317
665890d410 Fix connect_timeout request_timeout not work + tsan in RPCServerTestSuite (#790)
Fixes #791
2023-07-27 13:35:52 +01:00
John Freeman
545886561f Fix link to clio branch of rippled (#789) 2023-07-26 21:44:12 +01:00
Alex Kremer
68eec01dbc Fix TSAN issues part1 (#788)
Fixes a few issues from boost 1.82 migration and some Conan misconfigurations
2023-07-26 21:39:39 +01:00
Peter Chen
02621fe02e Add new RPC Handler "version" (#782)
Fixes #726
2023-07-26 20:02:11 +01:00
cyan317
6ad72446d1 Disable xrpl tests (#785) 2023-07-26 19:31:12 +01:00
Arihant Kothari
1d0a43669b Fix noRippleCheck fee field (#786)
Fixes #709
2023-07-26 19:22:17 +01:00
cyan317
71aabc8c29 Nftids (#780)
Fixes #769
2023-07-26 17:12:20 +01:00
cyan317
6b98579bfb Remove try catch for server_info(#781)
Avoid tsan false alert
2023-07-26 12:44:56 +01:00
cyan317
375ac2ffa6 Enable CI MACOS node (#783)
Fixes  #784
2023-07-26 11:39:13 +01:00
Alex Kremer
c6ca650767 Add initial Conan integration (#712)
Fixes #645
2023-07-24 18:43:02 +01:00
cyan317
2336148d0d Fix missing "validated" (#778)
Fixes #779
2023-07-18 10:52:22 +01:00
Shawn Xie
12178abf4d Use mismatch in getNFTokenMintData (#774) 2023-07-17 22:09:15 +01:00
Alex Kremer
b8705ae086 Add time/uptime/amendment_blocked to server_info (#775) 2023-07-14 16:46:10 +01:00
cyan317
b83d7478ef Unsupported Error when request server stream (#772)
Fixes #773
2023-07-14 14:44:40 +01:00
cyan317
4fd6d51d21 Rename WsSession to WsBase (#770) 2023-07-14 13:28:15 +01:00
cyan317
d195bdb66d Change limit tests (#766)
Fixes #771
2023-07-14 13:08:08 +01:00
Alex Kremer
50dbb51627 Implement configuration options for useful cassandra driver opts (#765)
Fixes #764
2023-07-12 15:59:06 +01:00
cyan317
2f369e175c Add "network_id" to server_info (#761)
Fixes #763
2023-07-12 12:09:08 +01:00
cyan317
47e03a7da3 Forward not supported fields (#757)
Fixes #760
2023-07-11 16:49:16 +01:00
cyan317
d7b84a2e7a Missing "tx_hash" for transaction_entry (#758)
Fixes #759
2023-07-11 16:47:47 +01:00
cyan317
e79425bc21 Remove "strict" (#755)
Fixes #756
2023-07-11 13:21:56 +01:00
cyan317
7710468f37 Ledger owner fund (#753)
Fixes #754
2023-07-11 12:36:48 +01:00
Alex Kremer
210d7fdbc8 Use clamp modifier on limit field instead of between validator (#752)
Fixes #751
2023-07-10 17:57:26 +01:00
Alex Kremer
ba8e7188ca Implement the Clamp modifier (#740)
Fixes #750
2023-07-10 16:09:20 +01:00
cyan317
271323b0f4 account_object supports nft page (#736)
Fix #696
2023-07-10 13:42:57 +01:00
cyan317
7b306f3ba0 version2's account_info (#747)
Fixes #743
2023-07-10 13:42:09 +01:00
cyan317
73805d44ad account_tx rpcLGR_IDXS_INVALID adapt to v2 (#749)
Fixes #748
2023-07-10 13:41:11 +01:00
cyan317
f19772907d account_flags (#745)
Fixes #746
2023-07-10 13:01:33 +01:00
Alex Kremer
616f0176c9 Change an error code in account_lines to match rippled (#742)
Fixes #741
2023-07-07 16:50:32 +01:00
Alex Kremer
9f4f5d319e Fix discrepancies in ledger_entry (#739)
Fixes #738
2023-07-07 12:04:59 +01:00
cyan317
dcbc4577c2 Use "invalidParam" when "book_offers" taker format is wrong (#734)
Fix #735
2023-07-05 17:25:17 +01:00
Alex Kremer
f4d8e18bf7 Add deletion_blockers_only support (#737)
Fixes #730
2023-07-05 17:04:08 +01:00
cyan317
b3e001ebfb Remove date from account_tx (#732)
Fixes #733
2023-07-04 15:40:54 +01:00
Alex Kremer
524821c0b0 Add strict field support (#731)
Fixes #729
2023-07-04 15:39:34 +01:00
cyan317
a292a607c2 Implement 'type' for 'ledger_data' (#705)
Fixes #703
2023-07-04 15:26:21 +01:00
Alex Kremer
81894c0a90 Implement deposit_authorized RPC and tests (#728)
Fixes #727
2023-07-04 11:21:41 +01:00
Alex Kremer
0a7def18cd Implement custom HTTP errors (#720)
Fixes #697
2023-07-04 11:02:32 +01:00
cyan317
1e969ba13b Unknown Option (#710)
Fixes #711
2023-07-03 11:02:14 +01:00
cyan317
ef62718a27 Fix max limit for account tx (#723)
Fixes #724
2023-07-03 10:56:43 +01:00
Alex Kremer
aadd9e50f0 Forward api_version 1 requests to rippled (#716)
Fixes #698
2023-06-26 09:52:57 +01:00
cyan317
d9e89746a4 Stop etl when crash (#708)
Fixes #706
2023-06-21 13:10:24 +01:00
cyan317
557ea5d7f6 Remove sensitive info from log (#701)
Fixes #702
2023-06-16 16:50:54 +01:00
cyan317
4cc3b3ec0f Fix account_tx marker issue (#699)
Fixes #700
2023-06-16 12:29:34 +01:00
Alex Kremer
a960471ef4 Support api_version (#695)
Fixes #64
2023-06-16 12:14:30 +01:00
cyan317
871d43c85f Account_tx bug fix (#694)
Fixes #693
2023-06-14 14:57:42 +01:00
cyan317
5ce3fff788 Read only for backend (#691)
Fixes #663
2023-06-14 13:05:37 +01:00
cyan317
a76194d299 Fix clang crash (#690)
Fixes #692
2023-06-13 11:35:16 +01:00
Alex Kremer
14f9f98cf2 Implement basic transformer tests (#689) 2023-06-13 11:16:52 +01:00
cyan317
01e4eed130 Fix marker issue (#687)
Fixes #688
2023-06-12 10:44:42 +01:00
cyan317
893315c50d Fix mismatch when subscribe book offers when both is true
Fixes #677
2023-06-12 10:23:19 +01:00
Alex Kremer
b83d206ced Add more fine grained counters (#683)
Fixes #665
2023-06-09 16:48:55 +01:00
cyan317
9d28e64383 report not support (#682)
Fixes #678 #679
2023-06-09 16:14:19 +01:00
cyan317
b873af2d43 Move the json check to RPC executor (#676)
Fixes #681
2023-06-09 11:35:15 +01:00
cyan317
435db339df Refactor web server (#667)
Fixs #674
2023-06-08 13:25:49 +01:00
cyan317
9836e4ceaf fix (#672)
Fix #673
2023-06-08 13:12:58 +01:00
Alex Kremer
5d2c079f1a Implement extractor tests (#671) 2023-06-07 12:33:46 +01:00
cyan317
244337c5b6 Fix (#669)
Fixes #670
2023-06-05 15:18:36 +01:00
Alex Kremer
b07fbb14dc Refactor ETL into smaller components (#649)
Fixes #666 and #664
2023-06-02 16:12:06 +01:00
Alex Kremer
7e8569b03a Remove old backend implementation (#662)
Fixes #661
2023-06-01 13:54:49 +01:00
cyan317
fc0c93b2ee hot wallet (#656)
Fix #654
2023-05-31 13:09:27 +01:00
cyan317
8ba7388d58 keep same with rippled (#655)
Fix #652
2023-05-31 13:08:31 +01:00
cyan317
0bbb539d0b update the default limit (#646)
Fixes #648
2023-05-25 11:39:08 +01:00
cyan317
c50174235f Fix (#647)
Fixs #609 #610 #611
2023-05-24 15:38:27 +01:00
cyan317
aace437245 Add the missing input check for "diff" field of "ledger" (#643)
Fixes #644
2023-05-17 19:02:55 +01:00
cyan317
2e3e8cd779 Fix mismatch error when owner is in wrong format (#641)
Fixes #642
2023-05-16 14:26:08 +01:00
Alex Kremer
6f93e1003e Address cppcheck issues (#640)
Fixes #639
2023-05-15 17:57:21 +01:00
cyan317
14978ca91d add nft_page (#637)
Fixes #638
2023-05-15 13:00:35 +01:00
Alex Kremer
9adcaeb21b Rename functions to camelCase (#636) 2023-05-15 11:38:48 +01:00
Alex Kremer
d94fe4e7ab Fix nft_history error to match rippled (#635)
Fixes #633
2023-05-15 11:28:24 +01:00
ledhed2222
c8029255ba fix nft uri (#634)
* fix nft uri

* remove some comments too
2023-05-14 04:15:45 -04:00
509 changed files with 65208 additions and 34017 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

View File

@@ -1,7 +1,7 @@
---
Language: Cpp
AccessModifierOffset: -4
AlignAfterOpenBracket: AlwaysBreak
AlignAfterOpenBracket: BlockIndent
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlinesLeft: true
@@ -18,20 +18,8 @@ AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: true
AfterControlStatement: true
AfterEnum: false
AfterFunction: true
AfterNamespace: false
AfterObjCDeclaration: true
AfterStruct: true
AfterUnion: true
BeforeCatch: true
BeforeElse: true
IndentBraces: false
BreakBeforeBinaryOperators: false
BreakBeforeBraces: Custom
BreakBeforeBraces: WebKit
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 120
@@ -43,13 +31,15 @@ Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros: [ Q_FOREACH, BOOST_FOREACH ]
IncludeBlocks: Regroup
IncludeCategories:
- Regex: '^<(BeastConfig)'
Priority: 0
- Regex: '^<(ripple)/'
- Regex: '^".*"$'
Priority: 1
- Regex: '^<.*\.(h|hpp)>$'
Priority: 2
- Regex: '^<(boost)/'
- Regex: '^<.*>$'
Priority: 3
- Regex: '.*'
Priority: 4
@@ -58,6 +48,8 @@ IndentCaseLabels: true
IndentFunctionDeclarationAfterType: false
IndentWidth: 4
IndentWrappedFunctionNames: false
IndentRequiresClause: true
RequiresClausePosition: OwnLine
KeepEmptyLinesAtTheStartOfBlocks: false
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
@@ -70,6 +62,7 @@ PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
QualifierAlignment: Right
ReflowComments: true
SortIncludes: true
SpaceAfterCStyleCast: false

133
.clang-tidy Normal file
View File

@@ -0,0 +1,133 @@
---
Checks: '-*,
bugprone-argument-comment,
bugprone-assert-side-effect,
bugprone-bad-signal-to-kill-thread,
bugprone-bool-pointer-implicit-conversion,
bugprone-copy-constructor-init,
bugprone-dangling-handle,
bugprone-dynamic-static-initializers,
bugprone-empty-catch,
bugprone-fold-init-type,
bugprone-forward-declaration-namespace,
bugprone-inaccurate-erase,
bugprone-incorrect-roundings,
bugprone-infinite-loop,
bugprone-integer-division,
bugprone-lambda-function-name,
bugprone-macro-parentheses,
bugprone-macro-repeated-side-effects,
bugprone-misplaced-operator-in-strlen-in-alloc,
bugprone-misplaced-pointer-arithmetic-in-alloc,
bugprone-misplaced-widening-cast,
bugprone-move-forwarding-reference,
bugprone-multiple-new-in-one-expression,
bugprone-multiple-statement-macro,
bugprone-no-escape,
bugprone-non-zero-enum-to-bool-conversion,
bugprone-parent-virtual-call,
bugprone-posix-return,
bugprone-redundant-branch-condition,
bugprone-reserved-identifier,
bugprone-unused-return-value,
bugprone-shared-ptr-array-mismatch,
bugprone-signal-handler,
bugprone-signed-char-misuse,
bugprone-sizeof-container,
bugprone-sizeof-expression,
bugprone-spuriously-wake-up-functions,
bugprone-standalone-empty,
bugprone-string-constructor,
bugprone-string-integer-assignment,
bugprone-string-literal-with-embedded-nul,
bugprone-stringview-nullptr,
bugprone-suspicious-enum-usage,
bugprone-suspicious-include,
bugprone-suspicious-memory-comparison,
bugprone-suspicious-memset-usage,
bugprone-suspicious-missing-comma,
bugprone-suspicious-realloc-usage,
bugprone-suspicious-semicolon,
bugprone-suspicious-string-compare,
bugprone-swapped-arguments,
bugprone-switch-missing-default-case,
bugprone-terminating-continue,
bugprone-throw-keyword-missing,
bugprone-too-small-loop-variable,
bugprone-undefined-memory-manipulation,
bugprone-undelegated-constructor,
bugprone-unhandled-exception-at-new,
bugprone-unhandled-self-assignment,
bugprone-unique-ptr-array-mismatch,
bugprone-unsafe-functions,
bugprone-unused-raii,
bugprone-use-after-move,
bugprone-virtual-near-miss,
cppcoreguidelines-init-variables,
cppcoreguidelines-misleading-capture-default-by-value,
cppcoreguidelines-pro-type-member-init,
cppcoreguidelines-pro-type-static-cast-downcast,
cppcoreguidelines-rvalue-reference-param-not-moved,
cppcoreguidelines-use-default-member-init,
cppcoreguidelines-virtual-class-destructor,
llvm-namespace-comment,
misc-const-correctness,
misc-definitions-in-headers,
misc-header-include-cycle,
misc-include-cleaner,
misc-misplaced-const,
misc-redundant-expression,
misc-static-assert,
misc-throw-by-value-catch-by-reference,
misc-unused-alias-decls,
misc-unused-using-decls,
modernize-concat-nested-namespaces,
modernize-deprecated-headers,
modernize-make-shared,
modernize-make-unique,
modernize-pass-by-value,
modernize-type-traits,
modernize-use-emplace,
modernize-use-equals-default,
modernize-use-equals-delete,
modernize-use-override,
modernize-use-using,
performance-faster-string-find,
performance-for-range-copy,
performance-implicit-conversion-in-loop,
performance-inefficient-vector-operation,
performance-move-const-arg,
performance-move-constructor-init,
performance-no-automatic-move,
performance-trivially-destructible,
readability-avoid-const-params-in-decls,
readability-braces-around-statements,
readability-const-return-type,
readability-container-contains,
readability-container-size-empty,
readability-convert-member-functions-to-static,
readability-duplicate-include,
readability-else-after-return,
readability-implicit-bool-conversion,
readability-inconsistent-declaration-parameter-name,
readability-make-member-function-const,
readability-misleading-indentation,
readability-non-const-parameter,
readability-redundant-declaration,
readability-redundant-member-init,
readability-redundant-string-init,
readability-simplify-boolean-expr,
readability-static-accessed-through-instance,
readability-static-definition-in-anonymous-namespace,
readability-suspicious-call-argument
'
CheckOptions:
readability-braces-around-statements.ShortStatementLines: 2
bugprone-unsafe-functions.ReportMoreUnsafeFunctions: true
bugprone-unused-return-value.CheckedReturnTypes: ::std::error_code;::std::error_condition;::std::errc;::std::expected
misc-include-cleaner.IgnoreHeaders: '.*/(detail|impl)/.*'
HeaderFilterRegex: '^.*/(src|unittests)/.*\.(h|hpp)$'
WarningsAsErrors: '*'

5
.clangd Normal file
View File

@@ -0,0 +1,5 @@
Diagnostics:
UnusedIncludes: Strict
MissingIncludes: Strict
Includes:
IgnoreHeader: ".*/(detail|impl)/.*"

11
.codecov.yml Normal file
View File

@@ -0,0 +1,11 @@
coverage:
status:
project:
default:
target: 50%
threshold: 2%
patch:
default:
target: 20% # Need to bump this number https://docs.codecov.com/docs/commit-status#patch-status
threshold: 2%

View File

@@ -1,20 +0,0 @@
#!/bin/bash
# Pushing a release branch requires an annotated tag at the released commit
branch=$(git rev-parse --abbrev-ref HEAD)
if [[ $branch =~ master ]]; then
# check if HEAD commit is tagged
if ! git describe --exact-match HEAD; then
echo "Commits to master must be tagged"
exit 1
fi
elif [[ $branch =~ release/* ]]; then
IFS=/ read -r branch rel_ver <<< ${branch}
tag=$(git describe --tags --abbrev=0)
if [[ "${rel_ver}" != "${tag}" ]]; then
echo "release/${rel_ver} branches must have annotated tag ${rel_ver}"
echo "git tag -am\"${rel_ver}\" ${rel_ver}"
exit 1
fi
fi

View File

@@ -4,10 +4,61 @@ exec 1>&2
# paths to check and re-format
sources="src unittests"
formatter="clang-format-11 -i"
formatter="clang-format -i"
version=$($formatter --version | grep -o '[0-9\.]*')
if [[ "17.0.0" > "$version" ]]; then
cat <<EOF
ERROR
-----------------------------------------------------------------------------
A minimum of version 17 of `which clang-format` is required.
Your version is $version.
Please fix paths and run again.
-----------------------------------------------------------------------------
EOF
exit 3
fi
# check there is no .h headers, only .hpp
wrong_headers=$(find $sources -name "*.h" | sed 's/^/ - /')
if [[ ! -z "$wrong_headers" ]]; then
cat <<EOF
ERROR
-----------------------------------------------------------------------------
Found .h headers in the source code. Please rename them to .hpp:
$wrong_headers
-----------------------------------------------------------------------------
EOF
exit 2
fi
function grep_code {
grep -l "${1}" ${sources} -r --include \*.hpp --include \*.cpp
}
if [[ "$OSTYPE" == "darwin"* ]]; then
# make all includes to be <...> style
grep_code '#include ".*"' | xargs sed -i '' -E 's|#include "(.*)"|#include <\1>|g'
# make local includes to be "..." style
main_src_dirs=$(find ./src -maxdepth 1 -type d -exec basename {} \; | tr '\n' '|' | sed 's/|$//' | sed 's/|/\\|/g')
grep_code "#include <\($main_src_dirs\)/.*>" | xargs sed -i '' -E "s|#include <(($main_src_dirs)/.*)>|#include \"\1\"|g"
else
# make all includes to be <...> style
grep_code '#include ".*"' | xargs sed -i -E 's|#include "(.*)"|#include <\1>|g'
# make local includes to be "..." style
main_src_dirs=$(find ./src -type d -maxdepth 1 -exec basename {} \; | paste -sd '|' | sed 's/|/\\|/g')
grep_code "#include <\($main_src_dirs\)/.*>" | xargs sed -i -E "s|#include <(($main_src_dirs)/.*)>|#include \"\1\"|g"
fi
first=$(git diff $sources)
find $sources -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 $formatter
find $sources -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.ipp' \) -print0 | xargs -0 $formatter
second=$(git diff $sources)
changes=$(diff <(echo "$first") <(echo "$second") | wc -l | sed -e 's/^[[:space:]]*//')
@@ -24,4 +75,3 @@ EOF
exit 1
fi
.githooks/ensure_release_tag

18
.github/actions/build_clio/action.yml vendored Normal file
View File

@@ -0,0 +1,18 @@
name: Build clio
description: Build clio in build directory
inputs:
target:
description: Build target name
default: all
runs:
using: composite
steps:
- name: Get number of threads
uses: ./.github/actions/get_number_of_threads
id: number_of_threads
- name: Build Clio
shell: bash
run: |
cd build
cmake --build . --parallel ${{ steps.number_of_threads.outputs.threads_number }} --target ${{ inputs.target }}

36
.github/actions/clang_format/action.yml vendored Normal file
View File

@@ -0,0 +1,36 @@
name: Check format
description: Check format using clang-format-17
runs:
using: composite
steps:
- name: Add llvm repo
run: |
echo 'deb http://apt.llvm.org/focal/ llvm-toolchain-focal-17 main' | sudo tee -a /etc/apt/sources.list
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
shell: bash
- name: Install packages
run: |
sudo apt update -qq
sudo apt install -y jq clang-format-17
sudo rm /usr/bin/clang-format
sudo ln -s /usr/bin/clang-format-17 /usr/bin/clang-format
shell: bash
- name: Run formatter
continue-on-error: true
id: run_formatter
run: |
./.githooks/pre-commit
shell: bash
- name: Check for differences
id: assert
shell: bash
run: |
git diff --color --exit-code | tee "clang-format.patch"
- name: Fail job
if: ${{ steps.run_formatter.outcome != 'success' }}
shell: bash
run: exit 1

View File

@@ -0,0 +1,33 @@
name: Generate code coverage report
description: Run tests, generate code coverage report and upload it to codecov.io
runs:
using: composite
steps:
- name: Run tests
shell: bash
run: |
build/clio_tests --backend_host=scylladb
- name: Run gcovr
shell: bash
run: |
gcovr -e unittests --xml build/coverage_report.xml -j8 --exclude-throw-branches
- name: Archive coverage report
uses: actions/upload-artifact@v3
with:
name: coverage-report.xml
path: build/coverage_report.xml
retention-days: 30
- name: Upload coverage report
uses: wandalen/wretry.action@v1.3.0
with:
action: codecov/codecov-action@v3
with: |
files: build/coverage_report.xml
fail_ci_if_error: true
verbose: true
token: ${{ env.CODECOV_TOKEN }}
attempt_limit: 5
attempt_delay: 10000

41
.github/actions/generate/action.yml vendored Normal file
View File

@@ -0,0 +1,41 @@
name: Run conan and cmake
description: Run conan and cmake
inputs:
conan_profile:
description: Conan profile name
required: true
conan_cache_hit:
description: Whether conan cache has been downloaded
required: true
default: 'false'
build_type:
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
required: true
default: 'Release'
code_coverage:
description: Whether conan's coverage option should be on or not
required: true
default: 'false'
runs:
using: composite
steps:
- name: Create build directory
shell: bash
run: mkdir -p build
- name: Run conan
shell: bash
env:
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
CODE_COVERAGE: "${{ inputs.code_coverage == 'true' && 'True' || 'False' }}"
run: |
cd build
conan install .. -of . -b $BUILD_OPTION -s build_type=${{ inputs.build_type }} -o clio:tests=True -o clio:lint=False -o clio:coverage="${CODE_COVERAGE}" --profile ${{ inputs.conan_profile }}
- name: Run cmake
shell: bash
env:
BUILD_TYPE: "${{ inputs.build_type }}"
run: |
cd build
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=${{ inputs.build_type }} ${{ inputs.extra_cmake_args }} .. -G Ninja

View File

@@ -0,0 +1,26 @@
name: Get number of threads
description: Determines number of threads to use on macOS and Linux
outputs:
threads_number:
description: Number of threads to use
value: ${{ steps.number_of_threads_export.outputs.num }}
runs:
using: composite
steps:
- name: Get number of threads on mac
id: mac_threads
if: ${{ runner.os == 'macOS' }}
shell: bash
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
- name: Get number of threads on Linux
id: linux_threads
if: ${{ runner.os == 'Linux' }}
shell: bash
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
- name: Export output variable
shell: bash
id: number_of_threads_export
run: |
echo "num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}" >> $GITHUB_OUTPUT

View File

@@ -0,0 +1,14 @@
name: Git common ancestor
description: Find the closest common commit
outputs:
commit:
description: Hash of commit
value: ${{ steps.find_common_ancestor.outputs.commit }}
runs:
using: composite
steps:
- name: Find common git ancestor
id: find_common_ancestor
shell: bash
run: |
echo "commit=$(git merge-base --fork-point origin/develop)" >> $GITHUB_OUTPUT

View File

@@ -1,13 +0,0 @@
runs:
using: composite
steps:
# Github's ubuntu-20.04 image already has clang-format-11 installed
- run: |
find src unittests -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-11 -i
shell: bash
- name: Check for differences
id: assert
shell: bash
run: |
git diff --color --exit-code | tee "clang-format.patch"

View File

@@ -0,0 +1,47 @@
name: Prepare runner
description: Install packages, set environment variables, create directories
inputs:
disable_ccache:
description: Whether ccache should be disabled
required: true
runs:
using: composite
steps:
- name: Install packages on mac
if: ${{ runner.os == 'macOS' }}
shell: bash
run: |
brew install llvm@14 pkg-config ninja bison cmake ccache jq gh
- name: Fix git permissions on Linux
if: ${{ runner.os == 'Linux' }}
shell: bash
run: git config --global --add safe.directory $PWD
- name: Set env variables for macOS
if: ${{ runner.os == 'macOS' }}
shell: bash
run: |
echo "CCACHE_DIR=${{ github.workspace }}/.ccache" >> $GITHUB_ENV
echo "CONAN_USER_HOME=${{ github.workspace }}" >> $GITHUB_ENV
- name: Set env variables for Linux
if: ${{ runner.os == 'Linux' }}
shell: bash
run: |
echo "CCACHE_DIR=/root/.ccache" >> $GITHUB_ENV
echo "CONAN_USER_HOME=/root/" >> $GITHUB_ENV
- name: Set CCACHE_DISABLE=1
if: ${{ inputs.disable_ccache == 'true' }}
shell: bash
run: |
echo "CCACHE_DISABLE=1" >> $GITHUB_ENV
- name: Create directories
shell: bash
run: |
mkdir -p $CCACHE_DIR
mkdir -p $CONAN_USER_HOME/.conan

View File

@@ -0,0 +1,59 @@
name: Restore cache
description: Find and restores conan and ccache cache
inputs:
conan_dir:
description: Path to .conan directory
required: true
ccache_dir:
description: Path to .ccache directory
required: true
build_type:
description: Current build type (e.g. Release, Debug)
required: true
default: Release
code_coverage:
description: Whether code coverage is on
required: true
default: 'false'
outputs:
conan_hash:
description: Hash to use as a part of conan cache key
value: ${{ steps.conan_hash.outputs.hash }}
conan_cache_hit:
description: True if conan cache has been downloaded
value: ${{ steps.conan_cache.outputs.cache-hit }}
ccache_cache_hit:
description: True if ccache cache has been downloaded
value: ${{ steps.ccache_cache.outputs.cache-hit }}
runs:
using: composite
steps:
- name: Find common commit
id: git_common_ancestor
uses: ./.github/actions/git_common_ancestor
- name: Calculate conan hash
id: conan_hash
shell: bash
run: |
conan info . -j info.json -o clio:tests=True
packages_info=$(cat info.json | jq '.[] | "\(.display_name): \(.id)"' | grep -v 'clio')
echo "$packages_info"
hash=$(echo "$packages_info" | shasum -a 256 | cut -d ' ' -f 1)
rm info.json
echo "hash=$hash" >> $GITHUB_OUTPUT
- name: Restore conan cache
uses: actions/cache/restore@v3
id: conan_cache
with:
path: ${{ inputs.conan_dir }}/data
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-develop-${{ steps.conan_hash.outputs.hash }}
- name: Restore ccache cache
uses: actions/cache/restore@v3
id: ccache_cache
if: ${{ env.CCACHE_DISABLE != '1' }}
with:
path: ${{ inputs.ccache_dir }}
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-develop-${{ steps.git_common_ancestor.outputs.commit }}

56
.github/actions/save_cache/action.yml vendored Normal file
View File

@@ -0,0 +1,56 @@
name: Save cache
description: Save conan and ccache cache for develop branch
inputs:
conan_dir:
description: Path to .conan directory
required: true
conan_hash:
description: Hash to use as a part of conan cache key
required: true
conan_cache_hit:
description: Whether conan cache has been downloaded
required: true
ccache_dir:
description: Path to .ccache directory
required: true
ccache_cache_hit:
description: Whether conan cache has been downloaded
required: true
ccache_cache_miss_rate:
description: How many cache misses happened
build_type:
description: Current build type (e.g. Release, Debug)
required: true
default: Release
code_coverage:
description: Whether code coverage is on
required: true
default: 'false'
runs:
using: composite
steps:
- name: Find common commit
id: git_common_ancestor
uses: ./.github/actions/git_common_ancestor
- name: Cleanup conan directory from extra data
if: ${{ inputs.conan_cache_hit != 'true' }}
shell: bash
run: |
conan remove "*" -s -b -f
- name: Save conan cache
if: ${{ inputs.conan_cache_hit != 'true' }}
uses: actions/cache/save@v3
with:
path: ${{ inputs.conan_dir }}/data
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-develop-${{ inputs.conan_hash }}
- name: Save ccache cache
if: ${{ inputs.ccache_cache_hit != 'true' || inputs.ccache_cache_miss_rate == '100.0' }}
uses: actions/cache/save@v3
with:
path: ${{ inputs.ccache_dir }}
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-develop-${{ steps.git_common_ancestor.outputs.commit }}

52
.github/actions/setup_conan/action.yml vendored Normal file
View File

@@ -0,0 +1,52 @@
name: Setup conan
description: Setup conan profile and artifactory
outputs:
conan_profile:
description: Created conan profile name
value: ${{ steps.conan_export_output.outputs.conan_profile }}
runs:
using: composite
steps:
- name: On mac
if: ${{ runner.os == 'macOS' }}
shell: bash
env:
CONAN_PROFILE: clio_clang_14
id: conan_setup_mac
run: |
echo "Creating $CONAN_PROFILE conan profile";
clang_path="$(brew --prefix llvm@14)/bin/clang"
clang_cxx_path="$(brew --prefix llvm@14)/bin/clang++"
conan profile new $CONAN_PROFILE --detect --force
conan profile update settings.compiler=clang $CONAN_PROFILE
conan profile update settings.compiler.version=14 $CONAN_PROFILE
conan profile update settings.compiler.cppstd=20 $CONAN_PROFILE
conan profile update "conf.tools.build:compiler_executables={\"c\": \"$clang_path\", \"cpp\": \"$clang_cxx_path\"}" $CONAN_PROFILE
conan profile update env.CC="$clang_path" $CONAN_PROFILE
conan profile update env.CXX="$clang_cxx_path" $CONAN_PROFILE
echo "created_conan_profile=$CONAN_PROFILE" >> $GITHUB_OUTPUT
- name: On linux
if: ${{ runner.os == 'Linux' }}
shell: bash
id: conan_setup_linux
run: |
echo "created_conan_profile=default" >> $GITHUB_OUTPUT
- name: Export output variable
shell: bash
id: conan_export_output
run: |
echo "conan_profile=${{ steps.conan_setup_mac.outputs.created_conan_profile || steps.conan_setup_linux.outputs.created_conan_profile }}" >> $GITHUB_OUTPUT
- name: Add conan-non-prod artifactory
shell: bash
run: |
if [[ -z $(conan remote list | grep conan-non-prod) ]]; then
echo "Adding conan-non-prod"
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
else
echo "Conan-non-prod is available"
fi

View File

@@ -1,212 +1,151 @@
name: Build Clio
name: Build
on:
push:
branches: [master, release/*, develop, develop-next]
branches: [master, release/*, develop]
pull_request:
branches: [master, release/*, develop, develop-next]
branches: [master, release/*, develop]
workflow_dispatch:
jobs:
lint:
name: Lint
name: Check format
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Run clang-format
uses: ./.github/actions/lint
uses: ./.github/actions/clang_format
build_clio:
name: Build Clio
runs-on: [self-hosted, heavy]
build:
name: Build
needs: lint
strategy:
fail-fast: false
matrix:
type:
- suffix: deb
image: rippleci/clio-dpkg-builder:2022-09-17
script: dpkg
- suffix: rpm
image: rippleci/clio-rpm-builder:2022-09-17
script: rpm
include:
- os: heavy
container:
image: rippleci/clio_ci:latest
build_type: Release
code_coverage: false
- os: heavy
container:
image: rippleci/clio_ci:latest
build_type: Debug
code_coverage: true
# - os: macOS
# build_type: Release
# code_coverage: false
runs-on: [self-hosted, "${{ matrix.os }}"]
container: ${{ matrix.container }}
services:
scylladb:
image: ${{ (matrix.code_coverage) && 'scylladb/scylla' || '' }}
options: >-
--health-cmd "cqlsh -e 'describe cluster'"
--health-interval 10s
--health-timeout 5s
--health-retries 5
container:
image: ${{ matrix.type.image }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
path: clio
fetch-depth: 0
- name: Clone Clio packaging repo
uses: actions/checkout@v3
- name: Prepare runner
uses: ./.github/actions/prepare_runner
with:
path: clio-packages
repository: XRPLF/clio-packages
ref: main
disable_ccache: false
- name: Build
- name: Setup conan
uses: ./.github/actions/setup_conan
id: conan
- name: Restore cache
uses: ./.github/actions/restore_cache
id: restore_cache
with:
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
ccache_dir: ${{ env.CCACHE_DIR }}
build_type: ${{ matrix.build_type }}
code_coverage: ${{ matrix.code_coverage }}
- name: Run conan and cmake
uses: ./.github/actions/generate
with:
conan_profile: ${{ steps.conan.outputs.conan_profile }}
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
build_type: ${{ matrix.build_type }}
code_coverage: ${{ matrix.code_coverage }}
- name: Build Clio
uses: ./.github/actions/build_clio
- name: Show ccache's statistics
shell: bash
id: ccache_stats
run: |
export CLIO_ROOT=$(realpath clio)
if [ ${{ matrix.type.suffix }} == "rpm" ]; then
source /opt/rh/devtoolset-11/enable
fi
cmake -S clio-packages -B clio-packages/build -DCLIO_ROOT=$CLIO_ROOT
cmake --build clio-packages/build --parallel $(nproc)
cp ./clio-packages/build/clio-prefix/src/clio-build/clio_tests .
mv ./clio-packages/build/*.${{ matrix.type.suffix }} .
- name: Artifact packages
ccache -s > /tmp/ccache.stats
miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
cat /tmp/ccache.stats
- name: Strip tests
if: ${{ !matrix.code_coverage }}
run: strip build/clio_tests
- name: Upload clio_server
uses: actions/upload-artifact@v3
with:
name: clio_${{ matrix.type.suffix }}_packages
path: ${{ github.workspace }}/*.${{ matrix.type.suffix }}
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}
path: build/clio_server
- name: Artifact clio_tests
- name: Upload clio_tests
if: ${{ !matrix.code_coverage }}
uses: actions/upload-artifact@v3
with:
name: clio_tests-${{ matrix.type.suffix }}
path: ${{ github.workspace }}/clio_tests
name: clio_tests_${{ runner.os }}
path: build/clio_tests
build_dev:
name: Build on Mac/Clang14 and run tests
needs: lint
continue-on-error: false
runs-on: [self-hosted, macOS]
steps:
- uses: actions/checkout@v3
- name: Save cache
uses: ./.github/actions/save_cache
with:
path: clio
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
ccache_dir: ${{ env.CCACHE_DIR }}
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
build_type: ${{ matrix.build_type }}
code_coverage: ${{ matrix.code_coverage }}
- name: Check Boost cache
id: boost
uses: actions/cache@v3
with:
path: boost_1_77_0
key: ${{ runner.os }}-boost
# TODO: This is not a part of build process but it is the easiest way to do it here.
# It will be refactored in https://github.com/XRPLF/clio/issues/1075
- name: Run code coverage
if: ${{ matrix.code_coverage }}
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
uses: ./.github/actions/code_coverage
- name: Build Boost
if: ${{ steps.boost.outputs.cache-hit != 'true' }}
run: |
rm -rf boost_1_77_0.tar.gz boost_1_77_0 # cleanup if needed first
curl -s -fOJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz"
tar zxf boost_1_77_0.tar.gz
cd boost_1_77_0
./bootstrap.sh
./b2 define=BOOST_ASIO_HAS_STD_INVOKE_RESULT cxxflags="-std=c++20"
- name: Install dependencies
run: |
brew install llvm@14 pkg-config protobuf openssl ninja cassandra-cpp-driver bison cmake
- name: Setup environment for llvm-14
run: |
export PATH="/usr/local/opt/llvm@14/bin:$PATH"
export LDFLAGS="-L/usr/local/opt/llvm@14/lib -L/usr/local/opt/llvm@14/lib/c++ -Wl,-rpath,/usr/local/opt/llvm@14/lib/c++"
export CPPFLAGS="-I/usr/local/opt/llvm@14/include"
- name: Build clio
run: |
export BOOST_ROOT=$(pwd)/boost_1_77_0
cd clio
cmake -B build -DCMAKE_C_COMPILER='/usr/local/opt/llvm@14/bin/clang' -DCMAKE_CXX_COMPILER='/usr/local/opt/llvm@14/bin/clang++'
if ! cmake --build build -j; then
echo '# 🔥🔥 MacOS AppleClang build failed!💥' >> $GITHUB_STEP_SUMMARY
exit 1
fi
- name: Run Test
run: |
cd clio/build
./clio_tests --gtest_filter="-BackendTest*:BackendCassandraBaseTest*:BackendCassandraTest*"
test_clio:
name: Test Clio
runs-on: [self-hosted, Linux]
needs: build_clio
test:
name: Run Tests
needs: build
strategy:
fail-fast: false
matrix:
suffix: [rpm, deb]
steps:
- uses: actions/checkout@v3
- name: Get clio_tests artifact
uses: actions/download-artifact@v3
with:
name: clio_tests-${{ matrix.suffix }}
- name: Run tests
timeout-minutes: 10
uses: ./.github/actions/test
code_coverage:
name: Build on Linux and code coverage
needs: lint
continue-on-error: false
runs-on: ubuntu-22.04
include:
- os: heavy
container:
image: rippleci/clio_ci:latest
# - os: macOS
runs-on: [self-hosted, "${{ matrix.os }}"]
container: ${{ matrix.container }}
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
with:
path: clio
- name: Check Boost cache
id: boost
uses: actions/cache@v3
with:
path: boost
key: ${{ runner.os }}-boost
- name: Build boost
if: steps.boost.outputs.cache-hit != 'true'
name: clio_tests_${{ runner.os }}
- name: Run clio_tests
run: |
curl -s -OJL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.gz"
tar zxf boost_1_77_0.tar.gz
mv boost_1_77_0 boost
cd boost
./bootstrap.sh
./b2
- name: install deps
run: |
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential doxygen bison flex autoconf clang-format gcovr
- name: Build clio
run: |
export BOOST_ROOT=$(pwd)/boost
cd clio
cmake -B build -DCODE_COVERAGE=on -DTEST_PARAMETER='--gtest_filter="-BackendTest*:BackendCassandraBaseTest*:BackendCassandraTest*"'
if ! cmake --build build -j$(nproc); then
echo '# 🔥Ubuntu build🔥 failed!💥' >> $GITHUB_STEP_SUMMARY
exit 1
fi
cd build
make clio_tests-ccov
- name: Code Coverage Summary Report
uses: irongut/CodeCoverageSummary@v1.2.0
with:
filename: clio/build/clio_tests-gcc-cov/out.xml
badge: true
output: both
format: markdown
- name: Save PR number and ccov report
run: |
mkdir -p ./UnitTestCoverage
echo ${{ github.event.number }} > ./UnitTestCoverage/NR
cp clio/build/clio_tests-gcc-cov/report.html ./UnitTestCoverage/report.html
cp code-coverage-results.md ./UnitTestCoverage/out.md
cat code-coverage-results.md > $GITHUB_STEP_SUMMARY
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
with:
files: clio/build/clio_tests-gcc-cov/out.xml
- uses: actions/upload-artifact@v3
with:
name: UnitTestCoverage
path: UnitTestCoverage/
- uses: actions/upload-artifact@v3
with:
name: code_coverage_report
path: clio/build/clio_tests-gcc-cov/out.xml
chmod +x ./clio_tests
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"

116
.github/workflows/clang-tidy.yml vendored Normal file
View File

@@ -0,0 +1,116 @@
name: Clang-tidy check
on:
schedule:
- cron: "0 6 * * 1-5"
workflow_dispatch:
pull_request:
branches: [develop]
paths:
- .clang_tidy
- .github/workflows/clang-tidy.yml
workflow_call:
jobs:
clang_tidy:
runs-on: [self-hosted, Linux]
container:
image: rippleci/clio_ci:latest
permissions:
contents: write
issues: write
pull-requests: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Prepare runner
uses: ./.github/actions/prepare_runner
with:
disable_ccache: true
- name: Setup conan
uses: ./.github/actions/setup_conan
id: conan
- name: Restore cache
uses: ./.github/actions/restore_cache
id: restore_cache
with:
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
ccache_dir: ${{ env.CCACHE_DIR }}
- name: Run conan and cmake
uses: ./.github/actions/generate
with:
conan_profile: ${{ steps.conan.outputs.conan_profile }}
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
build_type: Release
- name: Get number of threads
uses: ./.github/actions/get_number_of_threads
id: number_of_threads
- name: Run clang-tidy
continue-on-error: true
shell: bash
id: run_clang_tidy
run: |
run-clang-tidy-17 -p build -j ${{ steps.number_of_threads.outputs.threads_number }} -fix -quiet 1>output.txt
- name: Run pre-commit hook
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
continue-on-error: true
shell: bash
run: ./.githooks/pre-commit
- name: Print issues found
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
shell: bash
run: |
sed -i '/error\||/!d' ./output.txt
cat output.txt
rm output.txt
- name: Create an issue
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
id: create_issue
shell: bash
env:
GH_TOKEN: ${{ github.token }}
run: |
echo -e 'Clang-tidy found issues in the code:\n' > issue.md
echo -e "List of the issues found: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/" >> issue.md
gh issue create --assignee 'cindyyan317,godexsoft,kuznetsss' --label bug --title 'Clang-tidy found bugs in code🐛' --body-file ./issue.md > create_issue.log
created_issue=$(cat create_issue.log | sed 's|.*/||')
echo "created_issue=$created_issue" >> $GITHUB_OUTPUT
rm create_issue.log issue.md
- uses: crazy-max/ghaction-import-gpg@v5
with:
gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.ACTIONS_GPG_PASSPHRASE }}
git_user_signingkey: true
git_commit_gpgsign: true
- name: Create PR with fixes
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
uses: peter-evans/create-pull-request@v5
env:
GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }}
with:
commit-message: "[CI] clang-tidy auto fixes"
committer: Clio CI <skuznetsov@ripple.com>
branch: "clang_tidy/autofix"
branch-suffix: timestamp
delete-branch: true
title: "[CI] clang-tidy auto fixes"
body: "Fixes #${{ steps.create_issue.outputs.created_issue }}. Please review and commit clang-tidy fixes."
reviewers: "cindyyan317,godexsoft,kuznetsss"
- name: Fail the job
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
shell: bash
run: exit 1

View File

@@ -0,0 +1,29 @@
name: Restart clang-tidy workflow
on:
push:
branches: [develop]
workflow_dispatch:
jobs:
restart_clang_tidy:
runs-on: ubuntu-20.04
permissions:
actions: write
steps:
- uses: actions/checkout@v4
- name: Check last commit matches clang-tidy auto fixes
id: check
shell: bash
run: |
passed=$(if [[ $(git log -1 --pretty=format:%s | grep '\[CI\] clang-tidy auto fixes') ]]; then echo 'true' ; else echo 'false' ; fi)
echo "passed=$passed" >> $GITHUB_OUTPUT
- name: Run clang-tidy workflow
if: ${{ contains(steps.check.outputs.passed, 'true') }}
shell: bash
env:
GH_TOKEN: ${{ github.token }}
GH_REPO: ${{ github.repository }}
run: gh workflow run clang-tidy.yml

138
.github/workflows/nightly.yml vendored Normal file
View File

@@ -0,0 +1,138 @@
name: Nightly release
on:
schedule:
- cron: '0 5 * * 1-5'
workflow_dispatch:
jobs:
build:
name: Build clio
strategy:
fail-fast: false
matrix:
include:
# - os: macOS
# build_type: Release
- os: heavy
build_type: Release
container:
image: rippleci/clio_ci:latest
- os: heavy
build_type: Debug
container:
image: rippleci/clio_ci:latest
runs-on: [self-hosted, "${{ matrix.os }}"]
container: ${{ matrix.container }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Prepare runner
uses: ./.github/actions/prepare_runner
with:
disable_ccache: true
- name: Setup conan
uses: ./.github/actions/setup_conan
id: conan
- name: Run conan and cmake
uses: ./.github/actions/generate
with:
conan_profile: ${{ steps.conan.outputs.conan_profile }}
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
build_type: ${{ matrix.build_type }}
- name: Build Clio
uses: ./.github/actions/build_clio
- name: Strip tests
run: strip build/clio_tests
- name: Upload clio_tests
uses: actions/upload-artifact@v3
with:
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
path: build/clio_tests
- name: Compress clio_server
shell: bash
run: |
cd build
tar czf ./clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz ./clio_server
- name: Upload clio_server
uses: actions/upload-artifact@v3
with:
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}
path: build/clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz
run_tests:
needs: build
strategy:
fail-fast: false
matrix:
include:
# - os: macOS
# build_type: Release
- os: heavy
build_type: Release
- os: heavy
build_type: Debug
runs-on: [self-hosted, "${{ matrix.os }}"]
steps:
- uses: actions/download-artifact@v3
with:
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
- name: Run clio_tests
run: |
chmod +x ./clio_tests
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
nightly_release:
needs: run_tests
runs-on: ubuntu-20.04
env:
GH_REPO: ${{ github.repository }}
GH_TOKEN: ${{ github.token }}
permissions:
contents: write
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v3
with:
path: nightly_release
- name: Prepare files
shell: bash
run: |
cp ${{ github.workspace }}/.github/workflows/nightly_notes.md "${RUNNER_TEMP}/nightly_notes.md"
cd nightly_release
rm -r clio_tests*
for d in $(ls); do
archive_name=$(ls $d)
mv ${d}/${archive_name} ./
rm -r $d
sha256sum ./$archive_name > ./${archive_name}.sha256sum
cat ./$archive_name.sha256sum >> "${RUNNER_TEMP}/nightly_notes.md"
done
echo '```' >> "${RUNNER_TEMP}/nightly_notes.md"
- name: Remove current nightly release and nightly tag
shell: bash
run: |
gh release delete nightly --yes || true
git push origin :nightly || true
- name: Publish nightly release
shell: bash
run: |
gh release create nightly --prerelease --title "Clio development (nightly) build" \
--target $GITHUB_SHA --notes-file "${RUNNER_TEMP}/nightly_notes.md" \
./nightly_release/clio_server*

6
.github/workflows/nightly_notes.md vendored Normal file
View File

@@ -0,0 +1,6 @@
> **Note:** Please remember that this is a development release and it is not recommended for production use.
Changelog (including previous releases): https://github.com/XRPLF/clio/commits/nightly
## SHA256 checksums
```

40
.github/workflows/update_docker_ci.yml vendored Normal file
View File

@@ -0,0 +1,40 @@
name: Update CI docker image
on:
push:
branches: [develop]
paths:
- 'docker/ci/**'
- .github/workflows/update_docker_ci.yml
workflow_dispatch:
jobs:
build_and_push:
name: Build and push docker image
runs-on: ubuntu-20.04
steps:
- name: Login to DockerHub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USER }}
password: ${{ secrets.DOCKERHUB_PW }}
- uses: actions/checkout@v4
- uses: docker/setup-qemu-action@v3
- uses: docker/setup-buildx-action@v3
- uses: docker/metadata-action@v5
id: meta
with:
images: rippleci/clio_ci
tags: |
type=raw,value=latest
type=raw,value=gcc_11
type=raw,value=${{ env.GITHUB_SHA }}
- name: Build and push
uses: docker/build-push-action@v5
with:
context: ${{ github.workspace }}/docker/ci
platforms: linux/amd64,linux/arm64
push: true
tags: ${{ steps.meta.outputs.tags }}

6
.gitignore vendored
View File

@@ -1,6 +1,10 @@
*clio*.log
build*/
/build*/
.devcontainer
.build
.cache
.vscode
.python-version
CMakeUserPresets.json
config.json
src/main/impl/Build.cpp

View File

@@ -17,7 +17,9 @@
*/
//==============================================================================
#include <main/Build.h>
#include "main/Build.hpp"
#include <string>
namespace Build {
static constexpr char versionString[] = "@VERSION@";

5
CMake/Ccache.cmake Normal file
View File

@@ -0,0 +1,5 @@
find_program (CCACHE_PATH "ccache")
if (CCACHE_PATH)
set (CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
message (STATUS "Using ccache: ${CCACHE_PATH}")
endif ()

42
CMake/CheckCompiler.cmake Normal file
View File

@@ -0,0 +1,42 @@
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14)
message (FATAL_ERROR "Clang 14+ required for building clio")
endif ()
set (is_clang TRUE)
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14)
message (FATAL_ERROR "AppleClang 14+ required for building clio")
endif ()
set (is_appleclang TRUE)
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
message (FATAL_ERROR "GCC 11+ required for building clio")
endif ()
set (is_gcc TRUE)
else ()
message (FATAL_ERROR "Supported compilers: AppleClang 14+, Clang 14+, GCC 11+")
endif ()
if (san)
string (TOLOWER ${san} san)
set (SAN_FLAG "-fsanitize=${san}")
set (SAN_LIB "")
if (is_gcc)
if (san STREQUAL "address")
set (SAN_LIB "asan")
elseif (san STREQUAL "thread")
set (SAN_LIB "tsan")
elseif (san STREQUAL "memory")
set (SAN_LIB "msan")
elseif (san STREQUAL "undefined")
set (SAN_LIB "ubsan")
endif ()
endif ()
set (_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
set (CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
CHECK_CXX_COMPILER_FLAG (${SAN_FLAG} COMPILER_SUPPORTS_SAN)
set (CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
if (NOT COMPILER_SUPPORTS_SAN)
message (FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
endif ()
endif ()

31
CMake/ClangTidy.cmake Normal file
View File

@@ -0,0 +1,31 @@
if (lint)
# Find clang-tidy binary
if (DEFINED ENV{CLIO_CLANG_TIDY_BIN})
set (_CLANG_TIDY_BIN $ENV{CLIO_CLANG_TIDY_BIN})
if ((NOT EXISTS ${_CLANG_TIDY_BIN}) OR IS_DIRECTORY ${_CLANG_TIDY_BIN})
message (FATAL_ERROR "$ENV{CLIO_CLANG_TIDY_BIN} no such file. Check CLIO_CLANG_TIDY_BIN env variable")
endif ()
message (STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
else ()
find_program (_CLANG_TIDY_BIN NAMES "clang-tidy-17" "clang-tidy" REQUIRED)
endif ()
if (NOT _CLANG_TIDY_BIN)
message (FATAL_ERROR
"clang-tidy binary not found. Please set the CLIO_CLANG_TIDY_BIN environment variable or install clang-tidy.")
endif ()
# Support for https://github.com/matus-chochlik/ctcache
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
if (CLANG_TIDY_CACHE_PATH)
set (_CLANG_TIDY_CMD
"${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_BIN}"
CACHE STRING "A combined command to run clang-tidy with caching wrapper")
else ()
set(_CLANG_TIDY_CMD "${_CLANG_TIDY_BIN}")
endif ()
set (CMAKE_CXX_CLANG_TIDY "${_CLANG_TIDY_CMD};--quiet")
message (STATUS "Using clang-tidy: ${CMAKE_CXX_CLANG_TIDY}")
endif ()

View File

@@ -2,32 +2,38 @@
write version to source
#]===================================================================]
find_package(Git REQUIRED)
find_package (Git REQUIRED)
set(GIT_COMMAND rev-parse --short HEAD)
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
set (GIT_COMMAND rev-parse --short HEAD)
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
set(GIT_COMMAND branch --show-current)
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
set (GIT_COMMAND branch --show-current)
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
if(BRANCH STREQUAL "")
set(BRANCH "dev")
endif()
if (BRANCH STREQUAL "")
set (BRANCH "dev")
endif ()
if(NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-ref>
execute_process(COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
set(VERSION "${DATE}-${BRANCH}-${REV}")
else()
set(GIT_COMMAND describe --tags)
execute_process(COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND} OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
set(VERSION "${TAG_VERSION}-${REV}")
endif()
if (NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-rev>
execute_process (COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
set (VERSION "${DATE}-${BRANCH}-${REV}")
else ()
set (GIT_COMMAND describe --tags)
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
set (VERSION "${TAG_VERSION}-${REV}")
endif ()
if(CMAKE_BUILD_TYPE MATCHES Debug)
set(VERSION "${VERSION}+DEBUG")
endif()
if (CMAKE_BUILD_TYPE MATCHES Debug)
set (VERSION "${VERSION}+DEBUG")
endif ()
message(STATUS "Build version: ${VERSION}")
set(clio_version "${VERSION}")
message (STATUS "Build version: ${VERSION}")
set (clio_version "${VERSION}")
configure_file(CMake/Build.cpp.in ${CMAKE_SOURCE_DIR}/src/main/impl/Build.cpp)
configure_file (CMake/Build.cpp.in ${CMAKE_SOURCE_DIR}/src/main/impl/Build.cpp)

440
CMake/CodeCoverage.cmake Normal file
View File

@@ -0,0 +1,440 @@
# Copyright (c) 2012 - 2017, Lars Bilke
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# CHANGES:
#
# 2012-01-31, Lars Bilke
# - Enable Code Coverage
#
# 2013-09-17, Joakim Söderberg
# - Added support for Clang.
# - Some additional usage instructions.
#
# 2016-02-03, Lars Bilke
# - Refactored functions to use named parameters
#
# 2017-06-02, Lars Bilke
# - Merged with modified version from github.com/ufz/ogs
#
# 2019-05-06, Anatolii Kurotych
# - Remove unnecessary --coverage flag
#
# 2019-12-13, FeRD (Frank Dana)
# - Deprecate COVERAGE_LCOVR_EXCLUDES and COVERAGE_GCOVR_EXCLUDES lists in favor
# of tool-agnostic COVERAGE_EXCLUDES variable, or EXCLUDE setup arguments.
# - CMake 3.4+: All excludes can be specified relative to BASE_DIRECTORY
# - All setup functions: accept BASE_DIRECTORY, EXCLUDE list
# - Set lcov basedir with -b argument
# - Add automatic --demangle-cpp in lcovr, if 'c++filt' is available (can be
# overridden with NO_DEMANGLE option in setup_target_for_coverage_lcovr().)
# - Delete output dir, .info file on 'make clean'
# - Remove Python detection, since version mismatches will break gcovr
# - Minor cleanup (lowercase function names, update examples...)
#
# 2019-12-19, FeRD (Frank Dana)
# - Rename Lcov outputs, make filtered file canonical, fix cleanup for targets
#
# 2020-01-19, Bob Apthorpe
# - Added gfortran support
#
# 2020-02-17, FeRD (Frank Dana)
# - Make all add_custom_target()s VERBATIM to auto-escape wildcard characters
# in EXCLUDEs, and remove manual escaping from gcovr targets
#
# 2021-01-19, Robin Mueller
# - Add CODE_COVERAGE_VERBOSE option which will allow to print out commands which are run
# - Added the option for users to set the GCOVR_ADDITIONAL_ARGS variable to supply additional
# flags to the gcovr command
#
# 2020-05-04, Mihchael Davis
# - Add -fprofile-abs-path to make gcno files contain absolute paths
# - Fix BASE_DIRECTORY not working when defined
# - Change BYPRODUCT from folder to index.html to stop ninja from complaining about double defines
#
# 2021-05-10, Martin Stump
# - Check if the generator is multi-config before warning about non-Debug builds
#
# 2022-02-22, Marko Wehle
# - Change gcovr output from -o <filename> for --xml <filename> and --html <filename> output respectively.
# This will allow for Multiple Output Formats at the same time by making use of GCOVR_ADDITIONAL_ARGS, e.g. GCOVR_ADDITIONAL_ARGS "--txt".
#
# 2022-09-28, Sebastian Mueller
# - fix append_coverage_compiler_flags_to_target to correctly add flags
# - replace "-fprofile-arcs -ftest-coverage" with "--coverage" (equivalent)
#
# 2023-12-15, Bronek Kozicki
# - remove setup_target_for_coverage_lcov (slow) and setup_target_for_coverage_fastcov (no support for Clang)
# - fix Clang support by adding find_program( ... llvm-cov )
# - add Apple Clang support by adding execute_process( COMMAND xcrun -f llvm-cov ... )
# - add CODE_COVERAGE_GCOV_TOOL to explicitly select gcov tool and disable find_program
# - replace both functions setup_target_for_coverage_gcovr_* with single setup_target_for_coverage_gcovr
# - add support for all gcovr output formats
#
# USAGE:
#
# 1. Copy this file into your cmake modules path.
#
# 2. Add the following line to your CMakeLists.txt (best inside an if-condition
# using a CMake option() to enable it just optionally):
# include(CodeCoverage)
#
# 3. Append necessary compiler flags for all supported source files:
# append_coverage_compiler_flags()
# Or for specific target:
# append_coverage_compiler_flags_to_target(YOUR_TARGET_NAME)
#
# 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og
#
# 4. If you need to exclude additional directories from the report, specify them
# using full paths in the COVERAGE_EXCLUDES variable before calling
# setup_target_for_coverage_*().
# Example:
# set(COVERAGE_EXCLUDES
# '${PROJECT_SOURCE_DIR}/src/dir1/*'
# '/path/to/my/src/dir2/*')
# Or, use the EXCLUDE argument to setup_target_for_coverage_*().
# Example:
# setup_target_for_coverage_gcovr(
# NAME coverage
# EXECUTABLE testrunner
# EXCLUDE "${PROJECT_SOURCE_DIR}/src/dir1/*" "/path/to/my/src/dir2/*")
#
# 4.a NOTE: With CMake 3.4+, COVERAGE_EXCLUDES or EXCLUDE can also be set
# relative to the BASE_DIRECTORY (default: PROJECT_SOURCE_DIR)
# Example:
# set(COVERAGE_EXCLUDES "dir1/*")
# setup_target_for_coverage_gcovr(
# NAME coverage
# EXECUTABLE testrunner
# FORMAT html-details
# BASE_DIRECTORY "${PROJECT_SOURCE_DIR}/src"
# EXCLUDE "dir2/*")
#
# 4.b If you need to pass specific options to gcovr, specify them in
# GCOVR_ADDITIONAL_ARGS variable.
# Example:
# set (GCOVR_ADDITIONAL_ARGS --exclude-throw-branches --exclude-noncode-lines -s)
# setup_target_for_coverage_gcovr(
# NAME coverage
# EXECUTABLE testrunner
# EXCLUDE "src/dir1" "src/dir2")
#
# 5. Use the functions described below to create a custom make target which
# runs your test executable and produces a code coverage report.
#
# 6. Build a Debug build:
# cmake -DCMAKE_BUILD_TYPE=Debug ..
# make
# make my_coverage_target
include(CMakeParseArguments)
option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE)
# Check prereqs
find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test)
if (DEFINED CODE_COVERAGE_GCOV_TOOL)
set(GCOV_TOOL "${CODE_COVERAGE_GCOV_TOOL}")
elseif (DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
if (APPLE)
execute_process( COMMAND xcrun -f llvm-cov
OUTPUT_VARIABLE LLVMCOV_PATH
OUTPUT_STRIP_TRAILING_WHITESPACE
)
else()
find_program( LLVMCOV_PATH llvm-cov )
endif()
if(LLVMCOV_PATH)
set(GCOV_TOOL "${LLVMCOV_PATH} gcov")
endif()
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
find_program( GCOV_PATH gcov )
set(GCOV_TOOL "${GCOV_PATH}")
endif()
# Check supported compiler (Clang, GNU and Flang)
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
foreach(LANG ${LANGUAGES})
if("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
if("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
endif()
elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU"
AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang")
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
endif()
endforeach()
set(COVERAGE_COMPILER_FLAGS "-g --coverage"
CACHE INTERNAL "")
if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
if(HAVE_cxx_fprofile_abs_path)
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
endif()
include(CheckCCompilerFlag)
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
if(HAVE_c_fprofile_abs_path)
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
endif()
endif()
set(CMAKE_Fortran_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the Fortran compiler during coverage builds."
FORCE )
set(CMAKE_CXX_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the C++ compiler during coverage builds."
FORCE )
set(CMAKE_C_FLAGS_COVERAGE
${COVERAGE_COMPILER_FLAGS}
CACHE STRING "Flags used by the C compiler during coverage builds."
FORCE )
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
""
CACHE STRING "Flags used for linking binaries during coverage builds."
FORCE )
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
""
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
FORCE )
mark_as_advanced(
CMAKE_Fortran_FLAGS_COVERAGE
CMAKE_CXX_FLAGS_COVERAGE
CMAKE_C_FLAGS_COVERAGE
CMAKE_EXE_LINKER_FLAGS_COVERAGE
CMAKE_SHARED_LINKER_FLAGS_COVERAGE )
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
link_libraries(gcov)
endif()
# Defines a target for running and collection code coverage information
# Builds dependencies, runs the given executable and outputs reports.
# NOTE! The executable should always have a ZERO as exit code otherwise
# the coverage generation will not complete.
#
# setup_target_for_coverage_gcovr(
# NAME ctest_coverage # New target name
# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR
# DEPENDENCIES executable_target # Dependencies to build first
# BASE_DIRECTORY "../" # Base directory for report
# # (defaults to PROJECT_SOURCE_DIR)
# FORMAT "cobertura" # Output format, one of:
# # xml cobertura sonarqube json-summary
# # json-details coveralls csv txt
# # html-single html-nested html-details
# # (xml is an alias to cobertura;
# # if no format is set, defaults to xml)
# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative
# # to BASE_DIRECTORY, with CMake 3.4+)
# )
# The user can set the variable GCOVR_ADDITIONAL_ARGS to supply additional flags to the
# GCVOR command.
function(setup_target_for_coverage_gcovr)
set(options NONE)
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
if(NOT GCOV_TOOL)
message(FATAL_ERROR "Could not find gcov or llvm-cov tool! Aborting...")
endif()
if(NOT GCOVR_PATH)
message(FATAL_ERROR "Could not find gcovr tool! Aborting...")
endif()
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
if(DEFINED Coverage_BASE_DIRECTORY)
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
else()
set(BASEDIR ${PROJECT_SOURCE_DIR})
endif()
if(NOT DEFINED Coverage_FORMAT)
set(Coverage_FORMAT xml)
endif()
if("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
else()
if((Coverage_FORMAT STREQUAL "html-details")
OR (Coverage_FORMAT STREQUAL "html-nested"))
set(GCOVR_OUTPUT_FILE ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html)
set(GCOVR_CREATE_FOLDER ${PROJECT_BINARY_DIR}/${Coverage_NAME})
elseif(Coverage_FORMAT STREQUAL "html-single")
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.html)
elseif((Coverage_FORMAT STREQUAL "json-summary")
OR (Coverage_FORMAT STREQUAL "json-details")
OR (Coverage_FORMAT STREQUAL "coveralls"))
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.json)
elseif(Coverage_FORMAT STREQUAL "txt")
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.txt)
elseif(Coverage_FORMAT STREQUAL "csv")
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.csv)
else()
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.xml)
endif()
endif()
if ((Coverage_FORMAT STREQUAL "cobertura")
OR (Coverage_FORMAT STREQUAL "xml"))
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura "${GCOVR_OUTPUT_FILE}" )
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura-pretty )
set(Coverage_FORMAT cobertura) # overwrite xml
elseif(Coverage_FORMAT STREQUAL "sonarqube")
list(APPEND GCOVR_ADDITIONAL_ARGS --sonarqube "${GCOVR_OUTPUT_FILE}" )
elseif(Coverage_FORMAT STREQUAL "json-summary")
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary "${GCOVR_OUTPUT_FILE}" )
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary-pretty)
elseif(Coverage_FORMAT STREQUAL "json-details")
list(APPEND GCOVR_ADDITIONAL_ARGS --json "${GCOVR_OUTPUT_FILE}" )
list(APPEND GCOVR_ADDITIONAL_ARGS --json-pretty)
elseif(Coverage_FORMAT STREQUAL "coveralls")
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls "${GCOVR_OUTPUT_FILE}" )
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls-pretty)
elseif(Coverage_FORMAT STREQUAL "csv")
list(APPEND GCOVR_ADDITIONAL_ARGS --csv "${GCOVR_OUTPUT_FILE}" )
elseif(Coverage_FORMAT STREQUAL "txt")
list(APPEND GCOVR_ADDITIONAL_ARGS --txt "${GCOVR_OUTPUT_FILE}" )
elseif(Coverage_FORMAT STREQUAL "html-single")
list(APPEND GCOVR_ADDITIONAL_ARGS --html "${GCOVR_OUTPUT_FILE}" )
list(APPEND GCOVR_ADDITIONAL_ARGS --html-self-contained)
elseif(Coverage_FORMAT STREQUAL "html-nested")
list(APPEND GCOVR_ADDITIONAL_ARGS --html-nested "${GCOVR_OUTPUT_FILE}" )
elseif(Coverage_FORMAT STREQUAL "html-details")
list(APPEND GCOVR_ADDITIONAL_ARGS --html-details "${GCOVR_OUTPUT_FILE}" )
else()
message(FATAL_ERROR "Unsupported output style ${Coverage_FORMAT}! Aborting...")
endif()
# Collect excludes (CMake 3.4+: Also compute absolute paths)
set(GCOVR_EXCLUDES "")
foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
if(CMAKE_VERSION VERSION_GREATER 3.4)
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
endif()
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
endforeach()
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
# Combine excludes to several -e arguments
set(GCOVR_EXCLUDE_ARGS "")
foreach(EXCLUDE ${GCOVR_EXCLUDES})
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
endforeach()
# Set up commands which will be run to generate coverage data
# Run tests
set(GCOVR_EXEC_TESTS_CMD
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
)
# Create folder
if(DEFINED GCOVR_CREATE_FOLDER)
set(GCOVR_FOLDER_CMD
${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
else()
set(GCOVR_FOLDER_CMD echo) # dummy
endif()
# Running gcovr
set(GCOVR_CMD
${GCOVR_PATH}
--gcov-executable ${GCOV_TOOL}
--gcov-ignore-parse-errors=negative_hits.warn_once_per_file
-r ${BASEDIR}
${GCOVR_ADDITIONAL_ARGS}
${GCOVR_EXCLUDE_ARGS}
--object-directory=${PROJECT_BINARY_DIR}
)
if(CODE_COVERAGE_VERBOSE)
message(STATUS "Executed command report")
message(STATUS "Command to run tests: ")
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
if(NOT GCOVR_FOLDER_CMD STREQUAL "echo")
message(STATUS "Command to create a folder: ")
string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}")
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")
endif()
message(STATUS "Command to generate gcovr coverage data: ")
string(REPLACE ";" " " GCOVR_CMD_SPACED "${GCOVR_CMD}")
message(STATUS "${GCOVR_CMD_SPACED}")
endif()
add_custom_target(${Coverage_NAME}
COMMAND ${GCOVR_EXEC_TESTS_CMD}
COMMAND ${GCOVR_FOLDER_CMD}
COMMAND ${GCOVR_CMD}
BYPRODUCTS ${GCOVR_OUTPUT_FILE}
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
DEPENDS ${Coverage_DEPENDENCIES}
VERBATIM # Protect arguments to commands
COMMENT "Running gcovr to produce code coverage report."
)
# Show info where to find the report
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
COMMAND ;
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
)
endfunction() # setup_target_for_coverage_gcovr
function(append_coverage_compiler_flags)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
endfunction() # append_coverage_compiler_flags
# Setup coverage for specific library
function(append_coverage_compiler_flags_to_target name)
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
target_compile_options(${name} PRIVATE ${_flag_list})
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
target_link_libraries(${name} PRIVATE gcov)
endif()
endfunction()

11
CMake/Docs.cmake Normal file
View File

@@ -0,0 +1,11 @@
find_package (Doxygen REQUIRED)
set (DOXYGEN_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile)
set (DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
configure_file (${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
add_custom_target (docs
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Generating API documentation with Doxygen"
VERBATIM)

53
CMake/Settings.cmake Normal file
View File

@@ -0,0 +1,53 @@
set(COMPILER_FLAGS
-Wall
-Wcast-align
-Wdouble-promotion
-Wextra
-Werror
-Wformat=2
-Wimplicit-fallthrough
-Wmisleading-indentation
-Wno-narrowing
-Wno-deprecated-declarations
-Wno-dangling-else
-Wno-unused-but-set-variable
-Wnon-virtual-dtor
-Wnull-dereference
-Wold-style-cast
-pedantic
-Wpedantic
-Wunused
# FIXME: The following bunch are needed for gcc12 atm.
-Wno-missing-requires
-Wno-restrict
-Wno-null-dereference
-Wno-maybe-uninitialized
-Wno-unknown-warning-option # and this to work with clang
# TODO: Address these and others in https://github.com/XRPLF/clio/issues/1273
)
#TODO: reenable when we change CI #884
# if (is_gcc AND NOT lint)
# list(APPEND COMPILER_FLAGS
# -Wduplicated-branches
# -Wduplicated-cond
# -Wlogical-op
# -Wuseless-cast
# )
# endif ()
if (is_clang)
list(APPEND COMPILER_FLAGS
-Wshadow # gcc is to aggressive with shadowing https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
)
endif ()
if (is_appleclang)
list(APPEND COMPILER_FLAGS
-Wreorder-init-list
)
endif ()
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for the flags description
target_compile_options (clio PUBLIC ${COMPILER_FLAGS})

View File

@@ -0,0 +1,11 @@
include (CheckIncludeFileCXX)
check_include_file_cxx ("source_location" SOURCE_LOCATION_AVAILABLE)
if (SOURCE_LOCATION_AVAILABLE)
target_compile_definitions (clio PUBLIC "HAS_SOURCE_LOCATION")
endif ()
check_include_file_cxx ("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
if (EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
target_compile_definitions (clio PUBLIC "HAS_EXPERIMENTAL_SOURCE_LOCATION")
endif ()

View File

@@ -1,126 +0,0 @@
# call add_converage(module_name) to add coverage targets for the given module
function(add_converage module)
if("${CMAKE_C_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang"
OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
message("[Coverage] Building with llvm Code Coverage Tools")
# Using llvm gcov ; llvm install by xcode
set(LLVM_COV_PATH /Library/Developer/CommandLineTools/usr/bin)
if(NOT EXISTS ${LLVM_COV_PATH}/llvm-cov)
message(FATAL_ERROR "llvm-cov not found! Aborting.")
endif()
# set Flags
target_compile_options(${module} PRIVATE -fprofile-instr-generate
-fcoverage-mapping)
target_link_options(${module} PUBLIC -fprofile-instr-generate
-fcoverage-mapping)
target_compile_options(clio PRIVATE -fprofile-instr-generate
-fcoverage-mapping)
target_link_options(clio PUBLIC -fprofile-instr-generate
-fcoverage-mapping)
# llvm-cov
add_custom_target(
${module}-ccov-preprocessing
COMMAND LLVM_PROFILE_FILE=${module}.profraw $<TARGET_FILE:${module}>
COMMAND ${LLVM_COV_PATH}/llvm-profdata merge -sparse ${module}.profraw -o
${module}.profdata
DEPENDS ${module})
add_custom_target(
${module}-ccov-show
COMMAND ${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
-instr-profile=${module}.profdata -show-line-counts-or-regions
DEPENDS ${module}-ccov-preprocessing)
# add summary for CI parse
add_custom_target(
${module}-ccov-report
COMMAND
${LLVM_COV_PATH}/llvm-cov report $<TARGET_FILE:${module}>
-instr-profile=${module}.profdata
-ignore-filename-regex=".*_makefiles|.*unittests|.*_deps"
-show-region-summary=false
DEPENDS ${module}-ccov-preprocessing)
# exclude libs and unittests self
add_custom_target(
${module}-ccov
COMMAND
${LLVM_COV_PATH}/llvm-cov show $<TARGET_FILE:${module}>
-instr-profile=${module}.profdata -show-line-counts-or-regions
-output-dir=${module}-llvm-cov -format="html"
-ignore-filename-regex=".*_makefiles|.*unittests|.*_deps" > /dev/null 2>&1
DEPENDS ${module}-ccov-preprocessing)
add_custom_command(
TARGET ${module}-ccov
POST_BUILD
COMMENT
"Open ${module}-llvm-cov/index.html in your browser to view the coverage report."
)
elseif("${CMAKE_C_COMPILER_ID}" MATCHES "GNU" OR "${CMAKE_CXX_COMPILER_ID}"
MATCHES "GNU")
message("[Coverage] Building with Gcc Code Coverage Tools")
find_program(GCOV_PATH gcov)
if(NOT GCOV_PATH)
message(FATAL_ERROR "gcov not found! Aborting...")
endif() # NOT GCOV_PATH
find_program(GCOVR_PATH gcovr)
if(NOT GCOVR_PATH)
message(FATAL_ERROR "gcovr not found! Aborting...")
endif() # NOT GCOVR_PATH
set(COV_OUTPUT_PATH ${module}-gcc-cov)
target_compile_options(${module} PRIVATE -fprofile-arcs -ftest-coverage
-fPIC)
target_link_libraries(${module} PRIVATE gcov)
target_compile_options(clio PRIVATE -fprofile-arcs -ftest-coverage
-fPIC)
target_link_libraries(clio PRIVATE gcov)
# this target is used for CI as well generate the summary out.xml will send
# to github action to generate markdown, we can paste it to comments or
# readme
add_custom_target(
${module}-ccov
COMMAND ${module} ${TEST_PARAMETER}
COMMAND rm -rf ${COV_OUTPUT_PATH}
COMMAND mkdir ${COV_OUTPUT_PATH}
COMMAND
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR} -x
${COV_OUTPUT_PATH}/out.xml --exclude='${CMAKE_SOURCE_DIR}/unittests/'
--exclude='${PROJECT_BINARY_DIR}/'
COMMAND
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR}
--html ${COV_OUTPUT_PATH}/report.html
--exclude='${CMAKE_SOURCE_DIR}/unittests/'
--exclude='${PROJECT_BINARY_DIR}/'
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
COMMENT "Running gcovr to produce Cobertura code coverage report.")
# generate the detail report
add_custom_target(
${module}-ccov-report
COMMAND ${module} ${TEST_PARAMETER}
COMMAND rm -rf ${COV_OUTPUT_PATH}
COMMAND mkdir ${COV_OUTPUT_PATH}
COMMAND
gcovr -r ${CMAKE_SOURCE_DIR} --object-directory=${PROJECT_BINARY_DIR}
--html-details ${COV_OUTPUT_PATH}/index.html
--exclude='${CMAKE_SOURCE_DIR}/unittests/'
--exclude='${PROJECT_BINARY_DIR}/'
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
COMMENT "Running gcovr to produce Cobertura code coverage report.")
add_custom_command(
TARGET ${module}-ccov-report
POST_BUILD
COMMENT
"Open ${COV_OUTPUT_PATH}/index.html in your browser to view the coverage report."
)
else()
message(FATAL_ERROR "Complier not support yet")
endif()
endfunction()

View File

@@ -1,6 +1,11 @@
set(Boost_USE_STATIC_LIBS ON)
set(Boost_USE_STATIC_RUNTIME ON)
set (Boost_USE_STATIC_LIBS ON)
set (Boost_USE_STATIC_RUNTIME ON)
find_package(Boost 1.75 COMPONENTS filesystem log_setup log thread system REQUIRED)
target_link_libraries(clio PUBLIC ${Boost_LIBRARIES})
find_package (Boost 1.82 REQUIRED
COMPONENTS
program_options
coroutine
system
log
log_setup
)

5
CMake/deps/OpenSSL.cmake Normal file
View File

@@ -0,0 +1,5 @@
find_package (OpenSSL 1.1.1 REQUIRED)
set_target_properties (OpenSSL::SSL PROPERTIES
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
)

View File

@@ -1,24 +0,0 @@
From 5cd9d09d960fa489a0c4379880cd7615b1c16e55 Mon Sep 17 00:00:00 2001
From: CJ Cobb <ccobb@ripple.com>
Date: Wed, 10 Aug 2022 12:30:01 -0400
Subject: [PATCH] Remove bitset operator !=
---
src/ripple/protocol/Feature.h | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h
index b3ecb099b..6424be411 100644
--- a/src/ripple/protocol/Feature.h
+++ b/src/ripple/protocol/Feature.h
@@ -126,7 +126,6 @@ class FeatureBitset : private std::bitset<detail::numFeatures>
public:
using base::bitset;
using base::operator==;
- using base::operator!=;
using base::all;
using base::any;
--
2.32.0

View File

@@ -1,11 +0,0 @@
include(CheckIncludeFileCXX)
check_include_file_cxx("source_location" SOURCE_LOCATION_AVAILABLE)
if(SOURCE_LOCATION_AVAILABLE)
target_compile_definitions(clio PUBLIC "HAS_SOURCE_LOCATION")
endif()
check_include_file_cxx("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
if(EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
target_compile_definitions(clio PUBLIC "HAS_EXPERIMENTAL_SOURCE_LOCATION")
endif()

2
CMake/deps/Threads.cmake Normal file
View File

@@ -0,0 +1,2 @@
set (THREADS_PREFER_PTHREAD_FLAG ON)
find_package (Threads)

View File

@@ -1,153 +1 @@
find_package(ZLIB REQUIRED)
find_library(cassandra NAMES cassandra)
if(NOT cassandra)
message("System installed Cassandra cpp driver not found. Will build")
find_library(zlib NAMES zlib1g-dev zlib-devel zlib z)
if(NOT zlib)
message("zlib not found. will build")
add_library(zlib STATIC IMPORTED GLOBAL)
ExternalProject_Add(zlib_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/madler/zlib.git
GIT_TAG v1.2.12
INSTALL_COMMAND ""
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
)
ExternalProject_Get_Property (zlib_src SOURCE_DIR)
ExternalProject_Get_Property (zlib_src BINARY_DIR)
set (zlib_src_SOURCE_DIR "${SOURCE_DIR}")
file (MAKE_DIRECTORY ${zlib_src_SOURCE_DIR}/include)
set_target_properties (zlib PROPERTIES
IMPORTED_LOCATION
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
INTERFACE_INCLUDE_DIRECTORIES
${SOURCE_DIR}/include)
add_dependencies(zlib zlib_src)
file(TO_CMAKE_PATH "${zlib_src_SOURCE_DIR}" zlib_src_SOURCE_DIR)
endif()
find_library(krb5 NAMES krb5-dev libkrb5-dev)
if(NOT krb5)
message("krb5 not found. will build")
add_library(krb5 STATIC IMPORTED GLOBAL)
ExternalProject_Add(krb5_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/krb5/krb5.git
GIT_TAG krb5-1.20
UPDATE_COMMAND ""
CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared
BUILD_IN_SOURCE 1
BUILD_COMMAND make
INSTALL_COMMAND ""
BUILD_BYPRODUCTS <SOURCE_DIR>/lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a
)
message(${ep_lib_prefix}/krb5.a)
message(${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a)
ExternalProject_Get_Property (krb5_src SOURCE_DIR)
ExternalProject_Get_Property (krb5_src BINARY_DIR)
set (krb5_src_SOURCE_DIR "${SOURCE_DIR}")
file (MAKE_DIRECTORY ${krb5_src_SOURCE_DIR}/include)
set_target_properties (krb5 PROPERTIES
IMPORTED_LOCATION
${SOURCE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a
INTERFACE_INCLUDE_DIRECTORIES
${SOURCE_DIR}/include)
add_dependencies(krb5 krb5_src)
file(TO_CMAKE_PATH "${krb5_src_SOURCE_DIR}" krb5_src_SOURCE_DIR)
endif()
find_library(libuv1 NAMES uv1 libuv1 liubuv1-dev libuv1:amd64)
if(NOT libuv1)
message("libuv1 not found, will build")
add_library(libuv1 STATIC IMPORTED GLOBAL)
ExternalProject_Add(libuv_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/libuv/libuv.git
GIT_TAG v1.44.1
INSTALL_COMMAND ""
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
)
ExternalProject_Get_Property (libuv_src SOURCE_DIR)
ExternalProject_Get_Property (libuv_src BINARY_DIR)
set (libuv_src_SOURCE_DIR "${SOURCE_DIR}")
file (MAKE_DIRECTORY ${libuv_src_SOURCE_DIR}/include)
set_target_properties (libuv1 PROPERTIES
IMPORTED_LOCATION
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
INTERFACE_INCLUDE_DIRECTORIES
${SOURCE_DIR}/include)
add_dependencies(libuv1 libuv_src)
file(TO_CMAKE_PATH "${libuv_src_SOURCE_DIR}" libuv_src_SOURCE_DIR)
endif()
add_library (cassandra STATIC IMPORTED GLOBAL)
ExternalProject_Add(cassandra_src
PREFIX ${nih_cache_path}
GIT_REPOSITORY https://github.com/datastax/cpp-driver.git
GIT_TAG 2.16.2
CMAKE_ARGS
-DLIBUV_ROOT_DIR=${BINARY_DIR}
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include
-DCASS_BUILD_STATIC=ON
-DCASS_BUILD_SHARED=OFF
INSTALL_COMMAND ""
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a
)
ExternalProject_Get_Property (cassandra_src SOURCE_DIR)
ExternalProject_Get_Property (cassandra_src BINARY_DIR)
set (cassandra_src_SOURCE_DIR "${SOURCE_DIR}")
file (MAKE_DIRECTORY ${cassandra_src_SOURCE_DIR}/include)
set_target_properties (cassandra PROPERTIES
IMPORTED_LOCATION
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a
INTERFACE_INCLUDE_DIRECTORIES
${SOURCE_DIR}/include)
message("cass dirs")
message(${BINARY_DIR})
message(${SOURCE_DIR})
message(${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a)
add_dependencies(cassandra cassandra_src)
if(NOT libuv1)
ExternalProject_Add_StepDependencies(cassandra_src build libuv1)
target_link_libraries(cassandra INTERFACE libuv1)
else()
target_link_libraries(cassandra INTERFACE ${libuv1})
endif()
if(NOT krb5)
ExternalProject_Add_StepDependencies(cassandra_src build krb5)
target_link_libraries(cassandra INTERFACE krb5)
else()
target_link_libraries(cassandra INTERFACE ${krb5})
endif()
if(NOT zlib)
ExternalProject_Add_StepDependencies(cassandra_src build zlib)
target_link_libraries(cassandra INTERFACE zlib)
else()
target_link_libraries(cassandra INTERFACE ${zlib})
endif()
set(OPENSSL_USE_STATIC_LIBS TRUE)
find_package(OpenSSL REQUIRED)
target_link_libraries(cassandra INTERFACE OpenSSL::SSL)
file(TO_CMAKE_PATH "${cassandra_src_SOURCE_DIR}" cassandra_src_SOURCE_DIR)
target_link_libraries(clio PUBLIC cassandra)
else()
message("Found system installed cassandra cpp driver")
message(${cassandra})
find_path(cassandra_includes NAMES cassandra.h REQUIRED)
message(${cassandra_includes})
get_filename_component(CASSANDRA_HEADER ${cassandra_includes}/cassandra.h REALPATH)
get_filename_component(CASSANDRA_HEADER_DIR ${CASSANDRA_HEADER} DIRECTORY)
target_link_libraries (clio PUBLIC ${cassandra})
target_include_directories(clio PUBLIC ${CASSANDRA_HEADER_DIR})
endif()
find_package (cassandra-cpp-driver REQUIRED)

View File

@@ -1,20 +1,4 @@
FetchContent_Declare(
googletest
URL https://github.com/google/googletest/archive/609281088cfefc76f9d0ce82e1ff6c30cc3591e5.zip
)
find_package (GTest REQUIRED)
FetchContent_GetProperties(googletest)
if(NOT googletest_POPULATED)
FetchContent_Populate(googletest)
add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
target_link_libraries(clio_tests PUBLIC clio gmock_main)
target_include_directories(clio_tests PRIVATE unittests)
enable_testing()
include(GoogleTest)
gtest_discover_tests(clio_tests)
enable_testing ()
include (GoogleTest)

View File

@@ -0,0 +1,3 @@
target_compile_definitions(clio PUBLIC BOOST_STACKTRACE_LINK)
target_compile_definitions(clio PUBLIC BOOST_STACKTRACE_USE_BACKTRACE)
find_package(libbacktrace REQUIRED)

View File

@@ -1,14 +1 @@
FetchContent_Declare(
libfmt
URL https://github.com/fmtlib/fmt/releases/download/9.1.0/fmt-9.1.0.zip
)
FetchContent_GetProperties(libfmt)
if(NOT libfmt_POPULATED)
FetchContent_Populate(libfmt)
add_subdirectory(${libfmt_SOURCE_DIR} ${libfmt_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
target_link_libraries(clio PUBLIC fmt)
find_package (fmt REQUIRED)

1
CMake/deps/libxrpl.cmake Normal file
View File

@@ -0,0 +1 @@
find_package (xrpl REQUIRED)

View File

@@ -1,20 +0,0 @@
set(RIPPLED_REPO "https://github.com/ripple/rippled.git")
set(RIPPLED_BRANCH "1.9.2")
set(NIH_CACHE_ROOT "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "")
set(patch_command ! grep operator!= src/ripple/protocol/Feature.h || git apply < ${CMAKE_CURRENT_SOURCE_DIR}/CMake/deps/Remove-bitset-operator.patch)
message(STATUS "Cloning ${RIPPLED_REPO} branch ${RIPPLED_BRANCH}")
FetchContent_Declare(rippled
GIT_REPOSITORY "${RIPPLED_REPO}"
GIT_TAG "${RIPPLED_BRANCH}"
GIT_SHALLOW ON
PATCH_COMMAND "${patch_command}"
)
FetchContent_GetProperties(rippled)
if(NOT rippled_POPULATED)
FetchContent_Populate(rippled)
add_subdirectory(${rippled_SOURCE_DIR} ${rippled_BINARY_DIR} EXCLUDE_FROM_ALL)
endif()
target_link_libraries(clio PUBLIC xrpl_core grpc_pbufs)
target_include_directories(clio PUBLIC ${rippled_SOURCE_DIR}/src ) # TODO: Seems like this shouldn't be needed?

View File

@@ -1,16 +1,14 @@
set(CLIO_INSTALL_DIR "/opt/clio")
set(CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
set (CLIO_INSTALL_DIR "/opt/clio")
set (CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
install(TARGETS clio_server DESTINATION bin)
# install(TARGETS clio_tests DESTINATION bin) # NOTE: Do we want to install the tests?
install (TARGETS clio_server DESTINATION bin)
#install(FILES example-config.json DESTINATION etc RENAME config.json)
file(READ example-config.json config)
string(REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
file(WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
install(FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
file (READ example-config.json config)
string (REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
file (WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
install (FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
configure_file("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
configure_file ("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
install(FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
install (FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)

View File

@@ -1,6 +0,0 @@
target_compile_options(clio
PUBLIC -Wall
-Werror
-Wno-narrowing
-Wno-deprecated-declarations
-Wno-dangling-else)

View File

@@ -1,75 +1,131 @@
cmake_minimum_required(VERSION 3.16.3)
project(clio)
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
message(FATAL_ERROR "GCC 11+ required for building clio")
# ========================================================================== #
# Options #
# ========================================================================== #
option (verbose "Verbose build" FALSE)
option (tests "Build tests" FALSE)
option (docs "Generate doxygen docs" FALSE)
option (coverage "Build test coverage report" FALSE)
option (packaging "Create distribution packages" FALSE)
option (lint "Run clang-tidy checks during compilation" FALSE)
# ========================================================================== #
set (san "" CACHE STRING "Add sanitizer instrumentation")
set (CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
set_property (CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
# ========================================================================== #
# Include required modules
include (CMake/Ccache.cmake)
include (CheckCXXCompilerFlag)
include (CMake/ClangTidy.cmake)
# Set coverage build options
if (tests AND coverage)
include (CMake/CodeCoverage.cmake)
append_coverage_compiler_flags()
endif()
option(BUILD_TESTS "Build tests" TRUE)
if (verbose)
set (CMAKE_VERBOSE_MAKEFILE TRUE)
endif ()
option(VERBOSE "Verbose build" TRUE)
if(VERBOSE)
set(CMAKE_VERBOSE_MAKEFILE TRUE)
set(FETCHCONTENT_QUIET FALSE CACHE STRING "Verbose FetchContent()")
endif()
if (packaging)
add_definitions (-DPKG=1)
endif ()
add_library (clio)
if(PACKAGING)
add_definitions(-DPKG=1)
endif()
# Clio tweaks and checks
include (CMake/CheckCompiler.cmake)
include (CMake/Settings.cmake)
include (CMake/ClioVersion.cmake)
include (CMake/SourceLocation.cmake)
#c++20 removed std::result_of but boost 1.75 is still using it.
add_definitions(-DBOOST_ASIO_HAS_STD_INVOKE_RESULT=1)
# Clio deps
include (CMake/deps/libxrpl.cmake)
include (CMake/deps/Boost.cmake)
include (CMake/deps/OpenSSL.cmake)
include (CMake/deps/Threads.cmake)
include (CMake/deps/libfmt.cmake)
include (CMake/deps/cassandra.cmake)
include (CMake/deps/libbacktrace.cmake)
add_library(clio)
target_compile_features(clio PUBLIC cxx_std_20)
target_include_directories(clio PUBLIC src)
# TODO: Include directory will be wrong when installed.
target_include_directories (clio PUBLIC src)
target_compile_features (clio PUBLIC cxx_std_20)
include(FetchContent)
include(ExternalProject)
include(CMake/settings.cmake)
include(CMake/ClioVersion.cmake)
include(CMake/deps/rippled.cmake)
include(CMake/deps/libfmt.cmake)
include(CMake/deps/Boost.cmake)
include(CMake/deps/cassandra.cmake)
include(CMake/deps/SourceLocation.cmake)
target_link_libraries (clio
PUBLIC Boost::boost
PUBLIC Boost::coroutine
PUBLIC Boost::program_options
PUBLIC Boost::system
PUBLIC Boost::log
PUBLIC Boost::log_setup
PUBLIC Boost::stacktrace_backtrace
PUBLIC cassandra-cpp-driver::cassandra-cpp-driver
PUBLIC fmt::fmt
PUBLIC OpenSSL::Crypto
PUBLIC OpenSSL::SSL
PUBLIC xrpl::libxrpl
PUBLIC dl
PUBLIC libbacktrace::libbacktrace
target_sources(clio PRIVATE
INTERFACE Threads::Threads
)
if (is_gcc)
# FIXME: needed on gcc for now
target_compile_definitions (clio PUBLIC BOOST_ASIO_DISABLE_CONCEPTS)
endif ()
target_sources (clio PRIVATE
## Main
src/main/impl/Build.cpp
## Backend
src/backend/BackendInterface.cpp
src/backend/CassandraBackend.cpp
src/backend/SimpleCache.cpp
## NextGen Backend
src/backend/cassandra/impl/Future.cpp
src/backend/cassandra/impl/Cluster.cpp
src/backend/cassandra/impl/Batch.cpp
src/backend/cassandra/impl/Result.cpp
src/backend/cassandra/impl/Tuple.cpp
src/backend/cassandra/impl/SslContext.cpp
src/backend/cassandra/Handle.cpp
src/backend/cassandra/SettingsProvider.cpp
src/data/BackendCounters.cpp
src/data/BackendInterface.cpp
src/data/LedgerCache.cpp
src/data/cassandra/impl/Future.cpp
src/data/cassandra/impl/Cluster.cpp
src/data/cassandra/impl/Batch.cpp
src/data/cassandra/impl/Result.cpp
src/data/cassandra/impl/Tuple.cpp
src/data/cassandra/impl/SslContext.cpp
src/data/cassandra/Handle.cpp
src/data/cassandra/SettingsProvider.cpp
## ETL
src/etl/ETLSource.cpp
src/etl/ProbingETLSource.cpp
src/etl/Source.cpp
src/etl/ProbingSource.cpp
src/etl/NFTHelpers.cpp
src/etl/ReportingETL.cpp
## Subscriptions
src/subscriptions/SubscriptionManager.cpp
src/etl/ETLService.cpp
src/etl/ETLState.cpp
src/etl/LoadBalancer.cpp
src/etl/impl/ForwardCache.cpp
## Feed
src/feed/SubscriptionManager.cpp
src/feed/impl/TransactionFeed.cpp
src/feed/impl/LedgerFeed.cpp
src/feed/impl/ProposedTransactionFeed.cpp
src/feed/impl/SingleFeedBase.cpp
## Web
src/web/impl/AdminVerificationStrategy.cpp
src/web/IntervalSweepHandler.cpp
src/web/Resolver.cpp
## RPC
src/rpc/Errors.cpp
src/rpc/RPC.cpp
src/rpc/Factories.cpp
src/rpc/AMMHelpers.cpp
src/rpc/RPCHelpers.cpp
src/rpc/Counters.cpp
src/rpc/WorkQueue.cpp
src/rpc/common/Specs.cpp
src/rpc/common/Validators.cpp
# RPC impl
src/rpc/common/MetaProcessors.cpp
src/rpc/common/impl/APIVersionParser.cpp
src/rpc/common/impl/HandlerProvider.cpp
## RPC handler
## RPC handlers
src/rpc/handlers/AccountChannels.cpp
src/rpc/handlers/AccountCurrencies.cpp
src/rpc/handlers/AccountInfo.cpp
@@ -78,13 +134,16 @@ target_sources(clio PRIVATE
src/rpc/handlers/AccountObjects.cpp
src/rpc/handlers/AccountOffers.cpp
src/rpc/handlers/AccountTx.cpp
src/rpc/handlers/AMMInfo.cpp
src/rpc/handlers/BookChanges.cpp
src/rpc/handlers/BookOffers.cpp
src/rpc/handlers/DepositAuthorized.cpp
src/rpc/handlers/GatewayBalances.cpp
src/rpc/handlers/Ledger.cpp
src/rpc/handlers/LedgerData.cpp
src/rpc/handlers/LedgerEntry.cpp
src/rpc/handlers/LedgerRange.cpp
src/rpc/handlers/NFTsByIssuer.cpp
src/rpc/handlers/NFTBuyOffers.cpp
src/rpc/handlers/NFTHistory.cpp
src/rpc/handlers/NFTInfo.cpp
@@ -93,83 +152,209 @@ target_sources(clio PRIVATE
src/rpc/handlers/NoRippleCheck.cpp
src/rpc/handlers/Random.cpp
src/rpc/handlers/TransactionEntry.cpp
src/rpc/handlers/Tx.cpp
## Util
src/config/Config.cpp
src/log/Logger.cpp
src/util/Taggable.cpp)
src/util/config/Config.cpp
src/util/log/Logger.cpp
src/util/prometheus/Http.cpp
src/util/prometheus/Label.cpp
src/util/prometheus/MetricBase.cpp
src/util/prometheus/MetricBuilder.cpp
src/util/prometheus/MetricsFamily.cpp
src/util/prometheus/OStream.cpp
src/util/prometheus/Prometheus.cpp
src/util/Random.cpp
src/util/requests/RequestBuilder.cpp
src/util/requests/Types.cpp
src/util/requests/WsConnection.cpp
src/util/requests/impl/SslContext.cpp
src/util/Taggable.cpp
src/util/TerminationHandler.cpp
src/util/TxUtils.cpp
src/util/LedgerUtils.cpp
)
add_executable(clio_server src/main/main.cpp)
target_link_libraries(clio_server PUBLIC clio)
# Clio server
add_executable (clio_server src/main/Main.cpp)
target_link_libraries (clio_server PRIVATE clio)
target_link_options(clio_server
PRIVATE
$<$<AND:$<NOT:$<BOOL:${APPLE}>>,$<NOT:$<BOOL:${san}>>>:-static-libstdc++ -static-libgcc>
)
if(BUILD_TESTS)
set(TEST_TARGET clio_tests)
add_executable(${TEST_TARGET}
# Unittesting
if (tests)
set (TEST_TARGET clio_tests)
add_executable (${TEST_TARGET}
# Common
unittests/Main.cpp
unittests/Playground.cpp
unittests/Backend.cpp
unittests/Logger.cpp
unittests/Config.cpp
unittests/ProfilerTest.cpp
unittests/DOSGuard.cpp
unittests/SubscriptionTest.cpp
unittests/SubscriptionManagerTest.cpp
unittests/LoggerTests.cpp
unittests/ConfigTests.cpp
unittests/ProfilerTests.cpp
unittests/JsonUtilTests.cpp
unittests/DOSGuardTests.cpp
unittests/util/TestGlobals.cpp
unittests/util/AssertTests.cpp
unittests/util/BatchingTests.cpp
unittests/util/TestHttpServer.cpp
unittests/util/TestObject.cpp
unittests/util/TestWsServer.cpp
unittests/util/TxUtilTests.cpp
unittests/util/StringUtils.cpp
unittests/util/LedgerUtilsTests.cpp
unittests/util/prometheus/CounterTests.cpp
unittests/util/prometheus/GaugeTests.cpp
unittests/util/prometheus/HistogramTests.cpp
unittests/util/prometheus/HttpTests.cpp
unittests/util/prometheus/LabelTests.cpp
unittests/util/prometheus/MetricBuilderTests.cpp
unittests/util/prometheus/MetricsFamilyTests.cpp
unittests/util/prometheus/OStreamTests.cpp
unittests/util/requests/RequestBuilderTests.cpp
unittests/util/requests/SslContextTests.cpp
unittests/util/requests/WsConnectionTests.cpp
# ETL
unittests/etl/ExtractionDataPipeTests.cpp
unittests/etl/ExtractorTests.cpp
unittests/etl/TransformerTests.cpp
unittests/etl/CacheLoaderTests.cpp
unittests/etl/AmendmentBlockHandlerTests.cpp
unittests/etl/LedgerPublisherTests.cpp
unittests/etl/ETLStateTests.cpp
# RPC
unittests/rpc/ErrorTests.cpp
unittests/rpc/BaseTests.cpp
unittests/rpc/RPCHelpersTest.cpp
unittests/rpc/CountersTest.cpp
unittests/rpc/AdminVerificationTest.cpp
unittests/rpc/RPCHelpersTests.cpp
unittests/rpc/CountersTests.cpp
unittests/rpc/APIVersionTests.cpp
unittests/rpc/ForwardingProxyTests.cpp
unittests/rpc/WorkQueueTests.cpp
unittests/rpc/AmendmentsTests.cpp
unittests/rpc/JsonBoolTests.cpp
## RPC handlers
unittests/rpc/handlers/DefaultProcessorTests.cpp
unittests/rpc/handlers/TestHandlerTests.cpp
unittests/rpc/handlers/AccountCurrenciesTest.cpp
unittests/rpc/handlers/AccountLinesTest.cpp
unittests/rpc/handlers/AccountTxTest.cpp
unittests/rpc/handlers/AccountOffersTest.cpp
unittests/rpc/handlers/AccountInfoTest.cpp
unittests/rpc/handlers/AccountChannelsTest.cpp
unittests/rpc/handlers/AccountNFTsTest.cpp
unittests/rpc/handlers/BookOffersTest.cpp
unittests/rpc/handlers/GatewayBalancesTest.cpp
unittests/rpc/handlers/TxTest.cpp
unittests/rpc/handlers/TransactionEntryTest.cpp
unittests/rpc/handlers/LedgerEntryTest.cpp
unittests/rpc/handlers/LedgerRangeTest.cpp
unittests/rpc/handlers/NoRippleCheckTest.cpp
unittests/rpc/handlers/ServerInfoTest.cpp
unittests/rpc/handlers/PingTest.cpp
unittests/rpc/handlers/RandomTest.cpp
unittests/rpc/handlers/NFTInfoTest.cpp
unittests/rpc/handlers/NFTBuyOffersTest.cpp
unittests/rpc/handlers/NFTSellOffersTest.cpp
unittests/rpc/handlers/NFTHistoryTest.cpp
unittests/rpc/handlers/SubscribeTest.cpp
unittests/rpc/handlers/UnsubscribeTest.cpp
unittests/rpc/handlers/LedgerDataTest.cpp
unittests/rpc/handlers/AccountObjectsTest.cpp
unittests/rpc/handlers/BookChangesTest.cpp
unittests/rpc/handlers/LedgerTest.cpp
unittests/rpc/handlers/AccountCurrenciesTests.cpp
unittests/rpc/handlers/AccountLinesTests.cpp
unittests/rpc/handlers/AccountTxTests.cpp
unittests/rpc/handlers/AccountOffersTests.cpp
unittests/rpc/handlers/AccountInfoTests.cpp
unittests/rpc/handlers/AccountChannelsTests.cpp
unittests/rpc/handlers/AccountNFTsTests.cpp
unittests/rpc/handlers/BookOffersTests.cpp
unittests/rpc/handlers/DepositAuthorizedTests.cpp
unittests/rpc/handlers/GatewayBalancesTests.cpp
unittests/rpc/handlers/TxTests.cpp
unittests/rpc/handlers/TransactionEntryTests.cpp
unittests/rpc/handlers/LedgerEntryTests.cpp
unittests/rpc/handlers/LedgerRangeTests.cpp
unittests/rpc/handlers/NoRippleCheckTests.cpp
unittests/rpc/handlers/ServerInfoTests.cpp
unittests/rpc/handlers/PingTests.cpp
unittests/rpc/handlers/RandomTests.cpp
unittests/rpc/handlers/NFTInfoTests.cpp
unittests/rpc/handlers/NFTBuyOffersTests.cpp
unittests/rpc/handlers/NFTsByIssuerTest.cpp
unittests/rpc/handlers/NFTSellOffersTests.cpp
unittests/rpc/handlers/NFTHistoryTests.cpp
unittests/rpc/handlers/SubscribeTests.cpp
unittests/rpc/handlers/UnsubscribeTests.cpp
unittests/rpc/handlers/LedgerDataTests.cpp
unittests/rpc/handlers/AccountObjectsTests.cpp
unittests/rpc/handlers/BookChangesTests.cpp
unittests/rpc/handlers/LedgerTests.cpp
unittests/rpc/handlers/VersionHandlerTests.cpp
unittests/rpc/handlers/AMMInfoTests.cpp
# Backend
unittests/backend/cassandra/BaseTests.cpp
unittests/backend/cassandra/BackendTests.cpp
unittests/backend/cassandra/RetryPolicyTests.cpp
unittests/backend/cassandra/SettingsProviderTests.cpp
unittests/backend/cassandra/ExecutionStrategyTests.cpp
unittests/backend/cassandra/AsyncExecutorTests.cpp)
include(CMake/deps/gtest.cmake)
unittests/data/BackendFactoryTests.cpp
unittests/data/BackendInterfaceTests.cpp
unittests/data/BackendCountersTests.cpp
unittests/data/cassandra/BaseTests.cpp
unittests/data/cassandra/BackendTests.cpp
unittests/data/cassandra/RetryPolicyTests.cpp
unittests/data/cassandra/SettingsProviderTests.cpp
unittests/data/cassandra/ExecutionStrategyTests.cpp
unittests/data/cassandra/AsyncExecutorTests.cpp
# Webserver
unittests/web/AdminVerificationTests.cpp
unittests/web/ServerTests.cpp
unittests/web/RPCServerHandlerTests.cpp
unittests/web/WhitelistHandlerTests.cpp
unittests/web/SweepHandlerTests.cpp
# Feed
unittests/feed/SubscriptionManagerTests.cpp
unittests/feed/SingleFeedBaseTests.cpp
unittests/feed/ProposedTransactionFeedTests.cpp
unittests/feed/BookChangesFeedTests.cpp
unittests/feed/LedgerFeedTests.cpp
unittests/feed/TransactionFeedTests.cpp
unittests/feed/ForwardFeedTests.cpp
unittests/feed/TrackableSignalTests.cpp)
# test for dwarf5 bug on ci
target_compile_options(clio PUBLIC -gdwarf-4)
include (CMake/deps/gtest.cmake)
# if CODE_COVERAGE enable, add clio_test-ccov
if(CODE_COVERAGE)
include(CMake/coverage.cmake)
add_converage(${TEST_TARGET})
endif()
endif()
# See https://github.com/google/googletest/issues/3475
gtest_discover_tests (clio_tests DISCOVERY_TIMEOUT 10)
include(CMake/install/install.cmake)
if(PACKAGING)
include(CMake/packaging.cmake)
endif()
# Fix for dwarf5 bug on ci
target_compile_options (clio PUBLIC -gdwarf-4)
target_compile_definitions (${TEST_TARGET} PUBLIC UNITTEST_BUILD)
target_include_directories (${TEST_TARGET} PRIVATE unittests)
target_link_libraries (${TEST_TARGET} PUBLIC clio gtest::gtest)
# Generate `coverage_report` target if coverage is enabled
if (coverage)
if (DEFINED CODE_COVERAGE_REPORT_FORMAT)
set(CODE_COVERAGE_FORMAT ${CODE_COVERAGE_REPORT_FORMAT})
else()
set(CODE_COVERAGE_FORMAT html-details)
endif()
if (DEFINED CODE_COVERAGE_TESTS_ARGS)
set(TESTS_ADDITIONAL_ARGS ${CODE_COVERAGE_TESTS_ARGS})
separate_arguments(TESTS_ADDITIONAL_ARGS)
else()
set(TESTS_ADDITIONAL_ARGS "")
endif()
set (GCOVR_ADDITIONAL_ARGS --exclude-throw-branches -s)
setup_target_for_coverage_gcovr(
NAME coverage_report
FORMAT ${CODE_COVERAGE_FORMAT}
EXECUTABLE clio_tests
EXECUTABLE_ARGS --gtest_brief=1 ${TESTS_ADDITIONAL_ARGS}
EXCLUDE "unittests"
DEPENDENCIES clio_tests
)
endif ()
endif ()
# Enable selected sanitizer if enabled via `san`
if (san)
target_compile_options (clio
PUBLIC
# Sanitizers recommend minimum of -O1 for reasonable performance
$<$<CONFIG:Debug>:-O1>
${SAN_FLAG}
-fno-omit-frame-pointer)
target_compile_definitions (clio
PUBLIC
$<$<STREQUAL:${san},address>:SANITIZER=ASAN>
$<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN>
$<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>)
target_link_libraries (clio INTERFACE ${SAN_FLAG} ${SAN_LIB})
endif ()
# Generate `docs` target for doxygen documentation if enabled
# Note: use `make docs` to generate the documentation
if (docs)
include (CMake/Docs.cmake)
endif ()
include (CMake/install/install.cmake)
if (packaging)
include (CMake/packaging.cmake) # This file exists only in build runner
endif ()

View File

@@ -62,6 +62,11 @@ git commit --amend -S
git push --force
```
## Use ccache (optional)
Clio uses ccache to speed up compilation. If you want to use it, please make sure it is installed on your machine.
CMake will automatically detect it and use it if it's available.
## Fixing issues found during code review
While your code is in review, it's possible that some changes will be requested by reviewer(s).
This section describes the process of adding your fixes.
@@ -91,7 +96,7 @@ The button for that is near the bottom of the PR's page on GitHub.
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent.
## Formatting
Code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain.
Code must conform to `clang-format` version 17, unless the result would be unreasonably difficult to read or maintain.
To change your code to conform use `clang-format -i <your changed files>`.
## Avoid
@@ -126,6 +131,7 @@ Existing maintainers can resign, or be subject to a vote for removal at the behe
* [cindyyan317](https://github.com/cindyyan317) (Ripple)
* [godexsoft](https://github.com/godexsoft) (Ripple)
* [kuznetsss](https://github.com/kuznetsss) (Ripple)
* [legleux](https://github.com/legleux) (Ripple)
## Honorable ex-Maintainers

View File

@@ -1,3 +1,16 @@
PROJECT_NAME = "Clio"
INPUT = src
RECURSIVE = YES
INPUT = ../src ../unittests
EXCLUDE_PATTERNS = *Test*.cpp *Test*.hpp
RECURSIVE = YES
HAVE_DOT = YES
QUIET = YES
WARNINGS = NO
WARN_NO_PARAMDOC = NO
WARN_IF_INCOMPLETE_DOC = NO
WARN_IF_UNDOCUMENTED = NO
GENERATE_LATEX = NO
GENERATE_HTML = YES
SORT_MEMBERS_CTORS_1ST = YES

228
README.md
View File

@@ -1,50 +1,123 @@
# Clio
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC. Validated
historical ledger and transaction data are stored in a more space-efficient format,
[![Build status](https://github.com/XRPLF/clio/actions/workflows/build.yml/badge.svg?branch=develop)](https://github.com/XRPLF/clio/actions/workflows/build.yml?query=branch%3Adevelop)
[![Nightly release status](https://github.com/XRPLF/clio/actions/workflows/nightly.yml/badge.svg?branch=develop)](https://github.com/XRPLF/clio/actions/workflows/nightly.yml?query=branch%3Adevelop)
[![Clang-tidy checks status](https://github.com/XRPLF/clio/actions/workflows/clang-tidy.yml/badge.svg?branch=develop)](https://github.com/XRPLF/clio/actions/workflows/clang-tidy.yml?query=branch%3Adevelop)
[![Code coverage develop branch](https://codecov.io/gh/XRPLF/clio/branch/develop/graph/badge.svg?)](https://app.codecov.io/gh/XRPLF/clio)
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC.
Validated historical ledger and transaction data are stored in a more space-efficient format,
using up to 4 times less space than rippled. Clio can be configured to store data in Apache Cassandra or ScyllaDB,
allowing for scalable read throughput. Multiple Clio nodes can share
access to the same dataset, allowing for a highly available cluster of Clio nodes,
without the need for redundant data storage or computation.
allowing for scalable read throughput. Multiple Clio nodes can share access to the same dataset,
allowing for a highly available cluster of Clio nodes, without the need for redundant data storage or computation.
Clio offers the full rippled API, with the caveat that Clio by default only returns validated data.
This means that `ledger_index` defaults to `validated` instead of `current` for all requests.
Other non-validated data is also not returned, such as information about queued transactions.
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node and propagates the response back to the client.
To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
Clio does not connect to the peer-to-peer network. Instead, Clio extracts data from a group of specified rippled nodes. Running Clio requires access to at least one rippled node
from which data can be extracted. The rippled node does not need to be running on the same machine as Clio.
## Help
Feel free to open an [issue](https://github.com/XRPLF/clio/issues) if you have a feature request or something doesn't work as expected.
If you have any questions about building, running, contributing, using clio or any other, you could always start a new [discussion](https://github.com/XRPLF/clio/discussions).
## Requirements
1. Access to a Cassandra cluster or ScyllaDB cluster. Can be local or remote.
2. Access to one or more rippled nodes. Can be local or remote.
## Building
Clio is built with CMake. Clio requires at least GCC-11/clang-14.0.0 (C++20), and Boost 1.75.0.
Clio is built with CMake and uses Conan for managing dependencies.
It is written in C++20 and therefore requires a modern compiler.
Use these instructions to build a Clio executable from the source. These instructions were tested on Ubuntu 20.04 LTS.
## Prerequisites
### Minimum Requirements
- [Python 3.7](https://www.python.org/downloads/)
- [Conan 1.55](https://conan.io/downloads.html)
- [CMake 3.16](https://cmake.org/download/)
- [**Optional**] [GCovr](https://gcc.gnu.org/onlinedocs/gcc/Gcov.html) (needed for code coverage generation)
- [**Optional**] [CCache](https://ccache.dev/) (speeds up compilation if you are going to compile Clio often)
| Compiler | Version |
|-------------|---------|
| GCC | 11 |
| Clang | 14 |
| Apple Clang | 14.0.3 |
### Conan configuration
Clio does not require anything but default settings in your (`~/.conan/profiles/default`) Conan profile. It's best to have no extra flags specified.
> Mac example:
```
[settings]
os=Macos
os_build=Macos
arch=armv8
arch_build=armv8
compiler=apple-clang
compiler.version=14
compiler.libcxx=libc++
build_type=Release
compiler.cppstd=20
```
> Linux example:
```
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=11
compiler.libcxx=libstdc++11
build_type=Release
compiler.cppstd=20
```
### Artifactory
1. Make sure artifactory is setup with Conan
```sh
# Install dependencies
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake clang-format
# Install gcovr to run code coverage
sudo apt-get -y install gcovr
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
```
Now you should be able to download prebuilt `xrpl` package on some platforms.
# Compile Boost
wget -O $HOME/boost_1_75_0.tar.gz https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz
tar xvzf $HOME/boost_1_75_0.tar.gz
cd $HOME/boost_1_75_0
./bootstrap.sh
./b2 -j$(nproc)
echo "export BOOST_ROOT=$HOME/boost_1_75_0" >> $HOME/.profile && source $HOME/.profile
You might need to edit the `~/.conan/remotes.json` file to ensure that this newly added artifactory is listed last. Otherwise you might see compilation errors when building the project with gcc version 13 (or newer).
# Clone the Clio Git repository & build Clio
cd $HOME
git clone https://github.com/XRPLF/clio.git
cd $HOME/clio
cmake -B build && cmake --build build --parallel $(nproc)
2. Remove old packages you may have cached:
```sh
conan remove -f xrpl
```
## Building Clio
Navigate to Clio's root directory and perform
```sh
mkdir build && cd build
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=False
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
```
If all goes well, `conan install` will find required packages and `cmake` will do the rest. you should end up with `clio_server` and `clio_tests` in the `build` directory (the current directory).
> **Tip:** You can omit the `-o tests=True` in `conan install` command above if you don't want to build `clio_tests`.
> **Tip:** To generate a Code Coverage report, include `-o coverage=True` in the `conan install` command above, along with `-o tests=True` to enable tests. After running the `cmake` commands, execute `make clio_tests-ccov`. The coverage report will be found at `clio_tests-llvm-cov/index.html`.
## Building Clio with Docker
It is possible to build Clio using docker if you don't want to install all the dependencies on your machine.
```sh
docker run -it rippleci/clio_ci:latest
git clone https://github.com/XRPLF/clio
mkdir build && cd build
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=False
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
```
## Running
@@ -96,12 +169,12 @@ The parameters `ssl_cert_file` and `ssl_key_file` can also be added to the top l
An example of how to specify `ssl_cert_file` and `ssl_key_file` in the config:
```json
"server":{
"server": {
"ip": "0.0.0.0",
"port": 51233
},
"ssl_cert_file" : "/full/path/to/cert.file",
"ssl_key_file" : "/full/path/to/key.file"
"ssl_cert_file": "/full/path/to/cert.file",
"ssl_key_file": "/full/path/to/key.file"
```
Once your config files are ready, start rippled and Clio. It doesn't matter which you
@@ -172,6 +245,105 @@ which can cause high latencies. A possible alternative to this is to just deploy
a database in each region, and the Clio nodes in each region use their region's database.
This is effectively two systems.
Clio supports API versioning as [described here](https://xrpl.org/request-formatting.html#api-versioning).
It's possible to configure `minimum`, `maximum` and `default` version like so:
```json
"api_version": {
"min": 1,
"max": 2,
"default": 1
}
```
All of the above are optional.
Clio will fallback to hardcoded defaults when not specified in the config file or configured values are outside
of the minimum and maximum supported versions hardcoded in `src/rpc/common/APIVersion.h`.
> **Note:** See `example-config.json` for more details.
## Admin rights for requests
By default clio checks admin privileges by IP address from request (only `127.0.0.1` is considered to be an admin).
It is not very secure because the IP could be spoofed.
For a better security `admin_password` could be provided in the `server` section of clio's config:
```json
"server": {
"admin_password": "secret"
}
```
If the password is presented in the config, clio will check the Authorization header (if any) in each request for the password.
The Authorization header should contain type `Password` and the password from the config, e.g. `Password secret`.
Exactly equal password gains admin rights for the request or a websocket connection.
## Prometheus metrics collection
Clio natively supports Prometheus metrics collection. It accepts Prometheus requests on the port configured in `server` section of config.
Prometheus metrics are enabled by default. To disable it add `"prometheus": { "enabled": false }` to the config.
It is important to know that clio responds to Prometheus request only if they are admin requests, so Prometheus should be configured to send admin password in header.
There is an example of docker-compose file, Prometheus and Grafana configs in [examples/infrastructure](examples/infrastructure).
## Using clang-tidy for static analysis
Minimum clang-tidy version required is 17.0.
Clang-tidy could be run by cmake during building the project.
For that provide the option `-o lint=True` for `conan install` command:
```sh
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
```
By default cmake will try to find clang-tidy automatically in your system.
To force cmake use desired binary set `CLIO_CLANG_TIDY_BIN` environment variable as path to clang-tidy binary.
E.g.:
```sh
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@17/bin/clang-tidy
```
## Coverage report
Coverage report is intended for the developers using compilers GCC
or Clang (including Apple Clang). It is generated by the build target `coverage_report`,
which is only enabled when both `tests` and `coverage` options are set, e.g. with
`-o coverage=True -o tests=True` in `conan`
Prerequisites for the coverage report:
- [gcovr tool](https://gcovr.com/en/stable/getting-started.html) (can be installed e.g. with `pip install gcovr`)
- `gcov` for GCC (installed with the compiler by default) or
- `llvm-cov` for Clang (installed with the compiler by default, also on Apple)
- `Debug` build type
Coverage report is created when the following steps are completed, in order:
1. `clio_tests` binary built with the instrumentation data, enabled by the `coverage`
option mentioned above
2. completed run of unit tests, which populates coverage capture data
3. completed run of `gcovr` tool (which internally invokes either `gcov` or `llvm-cov`)
to assemble both instrumentation data and coverage capture data into a coverage report
Above steps are automated into a single target `coverage_report`. The instrumented
`clio_tests` binary can be also used for running regular unit tests. In case of a
spurious failure of unit tests, it is possile to re-run `coverage_report` target without
rebuilding the `clio_tests` binary (since it is simply a dependency of the coverage report target).
The default coverage report format is `html-details`, but the developers
can override it to any of the formats listed in `CMake/CodeCoverage.cmake`
by setting `CODE_COVERAGE_REPORT_FORMAT` variable in `cmake`. For example, CI
is setting this parameter to `xml` for the [codecov](codecov.io) integration.
In case if some unit tests predictably fail e.g. due to absence of a Cassandra database, it is possible
to set unit tests options in `CODE_COVERAGE_TESTS_ARGS` cmake variable, as demonstrated below:
```
cd .build
conan install .. --output-folder . --build missing --settings build_type=Debug -o tests=True -o coverage=True
cmake -DCODE_COVERAGE_REPORT_FORMAT=json-details -DCMAKE_BUILD_TYPE=Debug -DCODE_COVERAGE_TESTS_ARGS="--gtest_filter=-BackendCassandra*" -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ..
cmake --build . --target coverage_report
```
After `coverage_report` target is completed, the generated coverage report will be
stored inside the build directory, as either of:
- file named `coverage_report.*`, with a suitable extension for the report format, or
- directory named `coverage_report`, with `index.html` and other files inside, for `html-details` or `html-nested` report formats.
## Developing against `rippled` in standalone mode
If you wish you develop against a `rippled` instance running in standalone

View File

@@ -1,38 +1,43 @@
/*
* This is an example configuration file. Please do not use without modifying to suit your needs.
*/
{
"database":
{
"type":"cassandra",
"cassandra":
{
"secure_connect_bundle":"[path/to/zip. ignore if using contact_points]",
"contact_points":"[ip. ignore if using secure_connect_bundle]",
"port":"[port. ignore if using_secure_connect_bundle]",
"keyspace":"clio",
"username":"[username, if any]",
"password":"[password, if any]",
"max_requests_outstanding":25000,
"threads":8
"database": {
"type": "cassandra",
"cassandra": {
// This option can be used to setup a secure connect bundle connection
"secure_connect_bundle": "[path/to/zip. ignore if using contact_points]",
// The following options are used only if using contact_points
"contact_points": "[ip. ignore if using secure_connect_bundle]",
"port": "[port. ignore if using_secure_connect_bundle]",
// Authentication settings
"username": "[username, if any]",
"password": "[password, if any]",
// Other common settings
"keyspace": "clio",
"max_write_requests_outstanding": 25000,
"max_read_requests_outstanding": 30000,
"threads": 8
}
},
"etl_sources":
[
"etl_sources": [
{
"ip":"[rippled ip]",
"ws_port":"6006",
"grpc_port":"50051"
"ip": "[rippled ip]",
"ws_port": "6006",
"grpc_port": "50051"
}
],
"dos_guard":
{
"whitelist":["127.0.0.1"]
"dos_guard": {
"whitelist": [
"127.0.0.1"
]
},
"server":{
"ip":"0.0.0.0",
"port":8080
"server": {
"ip": "0.0.0.0",
"port": 8080
},
"log_level":"debug",
"log_file":"./clio.log",
"online_delete":0,
"extractor_threads":8,
"read_only":false
"log_level": "debug",
"log_file": "./clio.log",
"extractor_threads": 8,
"read_only": false
}

92
conanfile.py Normal file
View File

@@ -0,0 +1,92 @@
from conan import ConanFile
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
class Clio(ConanFile):
name = 'clio'
license = 'ISC'
author = 'Alex Kremer <akremer@ripple.com>, John Freeman <jfreeman@ripple.com>'
url = 'https://github.com/xrplf/clio'
description = 'Clio RPC server'
settings = 'os', 'compiler', 'build_type', 'arch'
options = {
'fPIC': [True, False],
'verbose': [True, False],
'tests': [True, False], # build unit tests; create `clio_tests` binary
'docs': [True, False], # doxygen API docs; create custom target 'docs'
'packaging': [True, False], # create distribution packages
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
'lint': [True, False], # run clang-tidy checks during compilation
}
requires = [
'boost/1.82.0',
'cassandra-cpp-driver/2.17.0',
'fmt/10.1.1',
'protobuf/3.21.9',
'grpc/1.50.1',
'openssl/1.1.1u',
'xrpl/2.2.0',
'libbacktrace/cci.20210118'
]
default_options = {
'fPIC': True,
'verbose': False,
'tests': False,
'packaging': False,
'coverage': False,
'lint': False,
'docs': False,
'xrpl/*:tests': False,
'cassandra-cpp-driver/*:shared': False,
'date/*:header_only': True,
'grpc/*:shared': False,
'grpc/*:secure': True,
'libpq/*:shared': False,
'lz4/*:shared': False,
'openssl/*:shared': False,
'protobuf/*:shared': False,
'protobuf/*:with_zlib': True,
'snappy/*:shared': False,
'gtest/*:no_main': True,
}
exports_sources = (
'CMakeLists.txt', 'CMake/*', 'src/*'
)
def requirements(self):
if self.options.tests:
self.requires('gtest/1.14.0')
def configure(self):
if self.settings.compiler == 'apple-clang':
self.options['boost'].visibility = 'global'
def layout(self):
cmake_layout(self)
# Fix this setting to follow the default introduced in Conan 1.48
# to align with our build instructions.
self.folders.generators = 'build/generators'
generators = 'CMakeDeps'
def generate(self):
tc = CMakeToolchain(self)
tc.variables['verbose'] = self.options.verbose
tc.variables['tests'] = self.options.tests
tc.variables['coverage'] = self.options.coverage
tc.variables['lint'] = self.options.lint
tc.variables['docs'] = self.options.docs
tc.variables['packaging'] = self.options.packaging
tc.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
cmake = CMake(self)
cmake.install()

67
docker/ci/dockerfile Normal file
View File

@@ -0,0 +1,67 @@
FROM ubuntu:focal
ARG DEBIAN_FRONTEND=noninteractive
ARG TARGETARCH
USER root
WORKDIR /root/
ENV GCC_VERSION=11 \
CCACHE_VERSION=4.8.3 \
LLVM_TOOLS_VERSION=17 \
GH_VERSION=2.40.0
# Add repositories
RUN apt-get -qq update \
&& apt-get -qq install -y --no-install-recommends --no-install-suggests gnupg wget curl software-properties-common \
&& add-apt-repository -y ppa:ubuntu-toolchain-r/test \
&& wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | apt-key add - \
&& apt-add-repository 'deb https://apt.kitware.com/ubuntu/ focal main' \
&& echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-${LLVM_TOOLS_VERSION} main" >> /etc/apt/sources.list \
&& wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
# Install packages
RUN apt update -qq \
&& apt install -y --no-install-recommends --no-install-suggests cmake python3 python3-pip sudo git \
ninja-build make pkg-config libzstd-dev libzstd1 g++-${GCC_VERSION} jq \
clang-format-${LLVM_TOOLS_VERSION} clang-tidy-${LLVM_TOOLS_VERSION} clang-tools-${LLVM_TOOLS_VERSION} \
&& update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-${GCC_VERSION} 100 \
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-${GCC_VERSION} 100 \
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_VERSION} 100 \
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-${GCC_VERSION} 100 \
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-${GCC_VERSION} 100 \
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-${GCC_VERSION} 100 \
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-${GCC_VERSION} 100 \
&& apt-get clean && apt remove -y software-properties-common \
&& pip3 install -q --upgrade --no-cache-dir pip \
&& pip3 install -q --no-cache-dir conan==1.62 gcovr
# Install ccache from source
WORKDIR /tmp
RUN wget "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \
&& tar xf "ccache-${CCACHE_VERSION}.tar.gz" \
&& cd "ccache-${CCACHE_VERSION}" \
&& mkdir build && cd build \
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \
&& ninja && cp ./ccache /usr/bin/ccache
# Install gh
RUN wget https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
&& tar xf gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/bin/gh
# Clean up
RUN rm -rf /tmp/* /var/tmp/*
WORKDIR /root/
# Using root by default is not very secure but github checkout action doesn't work with any other user
# https://github.com/actions/checkout/issues/956
# And Github Actions doc recommends using root
# https://docs.github.com/en/actions/creating-actions/dockerfile-support-for-github-actions#user
# Setup conan
RUN conan profile new default --detect \
&& conan profile update settings.compiler.cppstd=20 default \
&& conan profile update settings.compiler.libcxx=libstdc++11 default \
&& conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod

View File

@@ -1,3 +1,6 @@
/*
* This is an example configuration file. Please do not use without modifying to suit your needs.
*/
{
"database": {
"type": "cassandra",
@@ -9,9 +12,22 @@
"table_prefix": "",
"max_write_requests_outstanding": 25000,
"max_read_requests_outstanding": 30000,
"threads": 8
"threads": 8,
//
// Advanced options. USE AT OWN RISK:
// ---
"core_connections_per_host": 1, // Defaults to 1
"write_batch_size": 20 // Defaults to 20
//
// Below options will use defaults from cassandra driver if left unspecified.
// See https://docs.datastax.com/en/developer/cpp-driver/2.17/api/struct.CassCluster/ for details.
//
// "queue_size_io": 2
//
// ---
}
},
"allow_no_etl": false, // Allow Clio to run without valid ETL source, otherwise Clio will stop if ETL check fails
"etl_sources": [
{
"ip": "127.0.0.1",
@@ -20,21 +36,24 @@
}
],
"dos_guard": {
// Comma-separated list of IPs to exclude from rate limiting
"whitelist": [
"127.0.0.1"
], // comma-separated list of ips to exclude from rate limiting
/* The below values are the default values and are only specified here
* for documentation purposes. The rate limiter currently limits
* connections and bandwidth per ip. The rate limiter looks at the raw
* ip of a client connection, and so requests routed through a load
* balancer will all have the same ip and be treated as a single client
*/
"max_fetches": 1000000, // max bytes per ip per sweep interval
"max_connections": 20, // max connections per ip
"max_requests": 20, // max connections per ip
"sweep_interval": 1 // time in seconds before resetting bytes per ip count
],
//
// The below values are the default values and are only specified here
// for documentation purposes. The rate limiter currently limits
// connections and bandwidth per IP. The rate limiter looks at the raw
// IP of a client connection, and so requests routed through a load
// balancer will all have the same IP and be treated as a single client.
//
"max_fetches": 1000000, // Max bytes per IP per sweep interval
"max_connections": 20, // Max connections per IP
"max_requests": 20, // Max connections per IP per sweep interval
"sweep_interval": 1 // Time in seconds before resetting max_fetches and max_requests
},
"cache": {
// Comma-separated list of peer nodes that Clio can use to download cache from at startup
"peers": [
{
"ip": "127.0.0.1",
@@ -45,11 +64,18 @@
"server": {
"ip": "0.0.0.0",
"port": 51233,
/* Max number of requests to queue up before rejecting further requests.
* Defaults to 0, which disables the limit
*/
"max_queue_size": 500
// Max number of requests to queue up before rejecting further requests.
// Defaults to 0, which disables the limit.
"max_queue_size": 500,
// If request contains header with authorization, Clio will check if it matches the prefix 'Password ' + this value's sha256 hash
// If matches, the request will be considered as admin request
"admin_password": "xrp",
// If local_admin is true, Clio will consider requests come from 127.0.0.1 as admin requests
// It's true by default unless admin_password is set,'local_admin' : true and 'admin_password' can not be set at the same time
"local_amdin": false
},
// Overrides log level on a per logging channel.
// Defaults to global "log_level" for each unspecified channel.
"log_channels": [
{
"channel": "Backend",
@@ -76,18 +102,29 @@
"log_level": "trace"
}
],
"prometheus": {
"enabled": true,
"compress_reply": true
},
"log_level": "info",
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%", // This is the default format
// Log format (this is the default format)
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%",
"log_to_console": true,
"log_directory": "./clio_log",
// Clio logs to file in the specified directory only if "log_directory" is set
// "log_directory": "./clio_log",
"log_rotation_size": 2048,
"log_directory_max_size": 51200,
"log_rotation_hour_interval": 12,
"log_tag_style": "uint",
"extractor_threads": 8,
"read_only": false,
//"start_sequence": [integer] the ledger index to start from,
//"finish_sequence": [integer] the ledger index to finish at,
//"ssl_cert_file" : "/full/path/to/cert.file",
//"ssl_key_file" : "/full/path/to/key.file"
// "start_sequence": [integer] the ledger index to start from,
// "finish_sequence": [integer] the ledger index to finish at,
// "ssl_cert_file" : "/full/path/to/cert.file",
// "ssl_key_file" : "/full/path/to/key.file"
"api_version": {
"min": 1, // Minimum API version supported (could be 1 or 2)
"max": 2, // Maximum API version supported (could be 1 or 2, but >= min)
"default": 1 // Clio behaves the same as rippled by default
}
}

View File

@@ -0,0 +1,25 @@
# Example of clio monitoring infrastructure
This directory contains an example of docker based infrastructure to collect and visualise metrics from clio.
The structure of the directory:
- `compose.yaml`
Docker-compose file with Prometheus and Grafana set up.
- `prometheus.yaml`
Defines metrics collection from Clio and Prometheus itself.
Demonstrates how to setup Clio target and Clio's admin authorisation in Prometheus.
- `grafana/clio_dashboard.json`
Json file containing preconfigured dashboard in Grafana format.
- `grafana/dashboard_local.yaml`
Grafana configuration file defining the directory to search for dashboards json files.
- `grafana/datasources.yaml`
Grafana configuration file defining Prometheus as a data source for Grafana.
## How to try
1. Make sure you have `docker` and `docker-compose` installed.
2. Run `docker-compose up -d` from this directory. It will start docker containers with Prometheus and Grafana.
3. Open [http://localhost:3000/dashboards](http://localhost:3000/dashboards). Grafana login `admin`, password `grafana`.
There will be preconfigured Clio dashboard.
If Clio is not running yet launch Clio to see metrics. Some of the metrics may appear only after requests to Clio.

View File

@@ -0,0 +1,20 @@
services:
prometheus:
image: prom/prometheus
ports:
- 9090:9090
volumes:
- ./prometheus.yaml:/etc/prometheus/prometheus.yml
command:
- '--config.file=/etc/prometheus/prometheus.yml'
grafana:
image: grafana/grafana
ports:
- 3000:3000
environment:
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_ADMIN_PASSWORD=grafana
volumes:
- ./grafana/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
- ./grafana/dashboard_local.yaml:/etc/grafana/provisioning/dashboards/local.yaml
- ./grafana/clio_dashboard.json:/var/lib/grafana/dashboards/clio_dashboard.json

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,23 @@
apiVersion: 1
providers:
- name: 'Clio dashboard'
# <int> Org id. Default to 1
orgId: 1
# <string> name of the dashboard folder.
folder: ''
# <string> folder UID. will be automatically generated if not specified
folderUid: ''
# <string> provider type. Default to 'file'
type: file
# <bool> disable dashboard deletion
disableDeletion: false
# <int> how often Grafana will scan for changed dashboards
updateIntervalSeconds: 10
# <bool> allow updating provisioned dashboards from the UI
allowUiUpdates: false
options:
# <string, required> path to dashboard files on disk. Required when using the 'file' type
path: /var/lib/grafana/dashboards
# <bool> use folder names from filesystem to create folders in Grafana
foldersFromFilesStructure: true

View File

@@ -0,0 +1,8 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
url: http://prometheus:9090
isDefault: true
access: proxy

View File

@@ -0,0 +1,19 @@
scrape_configs:
- job_name: clio
scrape_interval: 5s
scrape_timeout: 5s
authorization:
type: Password
# sha256sum from password `xrp`
# use echo -n 'your_password' | shasum -a 256 to get hash
credentials: 0e1dcf1ff020cceabf8f4a60a32e814b5b46ee0bb8cd4af5c814e4071bd86a18
static_configs:
- targets:
- host.docker.internal:51233
- job_name: prometheus
honor_timestamps: true
scrape_interval: 15s
scrape_timeout: 10s
static_configs:
- targets:
- localhost:9090

View File

@@ -1,183 +0,0 @@
#!/usr/bin/python3
import argparse
from datetime import datetime
def getTime(line):
bracketOpen = line.find("[")
bracketClose = line.find("]")
timestampSub = line[bracketOpen+1:bracketClose]
timestamp = datetime.strptime(timestampSub, '%Y-%m-%d %H:%M:%S.%f')
return timestamp.timestamp()
def parseAccountTx(filename):
with open(filename) as f:
totalProcTime = 0.0
totalTxnTime = 0.0
numCalls = 0
for line in f:
if "executed stored_procedure" in line:
idx = line.find("in ")
idx = idx + 3
idx2 = line.find("num")
procTime = float(line[idx:idx2])
totalProcTime += procTime
if "fetchTransactions fetched" in line:
idx = line.find("took ")
idx = idx + 5
txnTime = float(line[idx:])
totalTxnTime += txnTime
numCalls = numCalls + 1
print(totalProcTime)
print(totalProcTime/numCalls)
print(totalTxnTime)
print(totalTxnTime/numCalls)
def parseLogs(filename, interval, minTxnCount = 0):
with open(filename) as f:
totalTime = 0
totalTxns = 0
totalObjs = 0
totalLoadTime = 0
start = 0
end = 0
totalLedgers = 0
intervalTime = 0
intervalTxns = 0
intervalObjs = 0
intervalLoadTime = 0
intervalStart = 0
intervalEnd = 0
intervalLedgers = 0
ledgersPerSecond = 0
print("ledgers, transactions, objects, loadTime, loadTime/ledger, ledgers/sec, txns/sec, objs/sec")
for line in f:
if "Load phase" in line:
sequenceIdx = line.find("Sequence : ")
hashIdx = line.find(" Hash :")
sequence = line[sequenceIdx + len("Sequence : "):hashIdx]
txnCountSubstr = "txn count = "
objCountSubstr = ". object count = "
loadTimeSubstr = ". load time = "
txnsSubstr = ". load txns per second = "
objsSubstr = ". load objs per second = "
txnCountIdx = line.find(txnCountSubstr)
objCountIdx = line.find(objCountSubstr)
loadTimeIdx = line.find(loadTimeSubstr)
txnsIdx = line.find(txnsSubstr)
objsIdx = line.find(objsSubstr)
txnCount = line[txnCountIdx + len(txnCountSubstr):objCountIdx]
objCount = line[objCountIdx + len(objCountSubstr):loadTimeIdx]
loadTime = line[loadTimeIdx + len(loadTimeSubstr):txnsIdx]
txnsPerSecond = line[txnsIdx + len(txnsSubstr):objsIdx]
objsPerSecond = line[objsIdx + len(objsSubstr):-1]
if int(txnCount) >= minTxnCount:
totalTime += float(loadTime);
totalTxns += float(txnCount)
totalObjs += float(objCount)
intervalTime += float(loadTime)
intervalTxns += float(txnCount)
intervalObjs += float(objCount)
totalLoadTime += float(loadTime)
intervalLoadTime += float(loadTime)
if start == 0:
start = getTime(line)
prevEnd = end
end = getTime(line)
if intervalStart == 0:
intervalStart = getTime(line)
intervalEnd = getTime(line)
totalLedgers+=1
intervalLedgers+=1
ledgersPerSecond = 0
if end != start:
ledgersPerSecond = float(totalLedgers) / float((end - start))
intervalLedgersPerSecond = 0
if intervalEnd != intervalStart:
intervalLedgersPerSecond = float(intervalLedgers) / float((intervalEnd - intervalStart))
if int(sequence) % interval == 0:
# print("Sequence = " + sequence + " : [time, txCount, objCount, txPerSec, objsPerSec]")
# print(loadTime + " , "
# + txnCount + " , "
# + objCount + " , "
# + txnsPerSecond + " , "
# + objsPerSecond)
# print("Interval Aggregate ( " + str(interval) + " ) [ledgers, txns, objects, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]: ")
print(str(intervalLedgers) + " , "
+ str(intervalTxns) + " , "
+ str(intervalObjs) + " , "
+ str(intervalLoadTime) + " , "
+ str(intervalLoadTime/intervalLedgers) + " , "
+ str(intervalLedgers/intervalLoadTime) + " , "
+ str(intervalTxns/intervalLoadTime) + " , "
+ str(intervalObjs/intervalLoadTime))
# print("Total Aggregate: [ledgers, txns, objects, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]")
# print(str(totalLedgers) + " , "
# + str(totalTxns) + " , "
# + str(totalObjs) + " , "
# + str(end-start) + " , "
# + str(ledgersPerSecond) + " , "
# + str(totalLoadTime/totalLedgers) + " , "
# + str(totalTxns/totalTime) + " , "
# + str(totalObjs/totalTime))
if int(sequence) % interval == 0:
intervalTime = 0
intervalTxns = 0
intervalObjs = 0
intervalStart = 0
intervalEnd = 0
intervalLedgers = 0
intervalLoadTime = 0
print("Total Aggregate: [ledgers, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]")
print(totalLedgers)
print(totalLoadTime)
print(str(totalLedgers) + " : "
+ str(end-start) + " : "
+ str(ledgersPerSecond) + " : "
+ str(totalLoadTime/totalLedgers) + " : "
+ str(totalTxns/totalTime) + " : "
+ str(totalObjs/totalTime))
parser = argparse.ArgumentParser(description='parses logs')
parser.add_argument("--filename")
parser.add_argument("--interval",default=100000)
parser.add_argument("--minTxnCount",default=0)
parser.add_argument("--account_tx",default=False)
args = parser.parse_args()
def run(args):
if args.account_tx:
parseAccountTx(args.filename)
else:
parseLogs(args.filename, int(args.interval))
run(args)

View File

@@ -1,322 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <backend/BackendInterface.h>
#include <log/Logger.h>
using namespace clio;
// local to compilation unit loggers
namespace {
clio::Logger gLog{"Backend"};
} // namespace
namespace Backend {
bool
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
{
gLog.debug() << "Want finish writes for " << ledgerSequence;
auto commitRes = doFinishWrites();
if (commitRes)
{
gLog.debug() << "Successfully commited. Updating range now to " << ledgerSequence;
updateRange(ledgerSequence);
}
return commitRes;
}
void
BackendInterface::writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob)
{
assert(key.size() == sizeof(ripple::uint256));
doWriteLedgerObject(std::move(key), seq, std::move(blob));
}
std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const
{
gLog.trace() << "called";
while (true)
{
try
{
return hardFetchLedgerRange(yield);
}
catch (DatabaseTimeout& t)
{
;
}
}
}
std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRangeNoThrow() const
{
gLog.trace() << "called";
return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
}
// *** state data methods
std::optional<Blob>
BackendInterface::fetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
auto obj = cache_.get(key, sequence);
if (obj)
{
gLog.trace() << "Cache hit - " << ripple::strHex(key);
return *obj;
}
else
{
gLog.trace() << "Cache miss - " << ripple::strHex(key);
auto dbObj = doFetchLedgerObject(key, sequence, yield);
if (!dbObj)
gLog.trace() << "Missed cache and missed in db";
else
gLog.trace() << "Missed cache but found in db";
return dbObj;
}
}
std::vector<Blob>
BackendInterface::fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const
{
std::vector<Blob> results;
results.resize(keys.size());
std::vector<ripple::uint256> misses;
for (size_t i = 0; i < keys.size(); ++i)
{
auto obj = cache_.get(keys[i], sequence);
if (obj)
results[i] = *obj;
else
misses.push_back(keys[i]);
}
gLog.trace() << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
if (misses.size())
{
auto objs = doFetchLedgerObjects(misses, sequence, yield);
for (size_t i = 0, j = 0; i < results.size(); ++i)
{
if (results[i].size() == 0)
{
results[i] = objs[j];
++j;
}
}
}
return results;
}
// Fetches the successor to key/index
std::optional<ripple::uint256>
BackendInterface::fetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
auto succ = cache_.getSuccessor(key, ledgerSequence);
if (succ)
gLog.trace() << "Cache hit - " << ripple::strHex(key);
else
gLog.trace() << "Cache miss - " << ripple::strHex(key);
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
}
std::optional<LedgerObject>
BackendInterface::fetchSuccessorObject(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context& yield) const
{
auto succ = fetchSuccessorKey(key, ledgerSequence, yield);
if (succ)
{
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
if (!obj)
return {{*succ, {}}};
return {{*succ, *obj}};
}
return {};
}
BookOffersPage
BackendInterface::fetchBookOffers(
ripple::uint256 const& book,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
boost::asio::yield_context& yield) const
{
// TODO try to speed this up. This can take a few seconds. The goal is
// to get it down to a few hundred milliseconds.
BookOffersPage page;
const ripple::uint256 bookEnd = ripple::getQualityNext(book);
ripple::uint256 uTipIndex = book;
std::vector<ripple::uint256> keys;
auto getMillis = [](auto diff) { return std::chrono::duration_cast<std::chrono::milliseconds>(diff).count(); };
auto begin = std::chrono::system_clock::now();
std::uint32_t numSucc = 0;
std::uint32_t numPages = 0;
long succMillis = 0;
long pageMillis = 0;
while (keys.size() < limit)
{
auto mid1 = std::chrono::system_clock::now();
auto offerDir = fetchSuccessorObject(uTipIndex, ledgerSequence, yield);
auto mid2 = std::chrono::system_clock::now();
numSucc++;
succMillis += getMillis(mid2 - mid1);
if (!offerDir || offerDir->key >= bookEnd)
{
gLog.trace() << "offerDir.has_value() " << offerDir.has_value() << " breaking";
break;
}
uTipIndex = offerDir->key;
while (keys.size() < limit)
{
++numPages;
ripple::STLedgerEntry sle{ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key};
auto indexes = sle.getFieldV256(ripple::sfIndexes);
keys.insert(keys.end(), indexes.begin(), indexes.end());
auto next = sle.getFieldU64(ripple::sfIndexNext);
if (!next)
{
gLog.trace() << "Next is empty. breaking";
break;
}
auto nextKey = ripple::keylet::page(uTipIndex, next);
auto nextDir = fetchLedgerObject(nextKey.key, ledgerSequence, yield);
assert(nextDir);
offerDir->blob = *nextDir;
offerDir->key = nextKey.key;
}
auto mid3 = std::chrono::system_clock::now();
pageMillis += getMillis(mid3 - mid2);
}
auto mid = std::chrono::system_clock::now();
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < keys.size() && i < limit; ++i)
{
gLog.trace() << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
<< " ledgerSequence = " << ledgerSequence;
assert(objs[i].size());
page.offers.push_back({keys[i], objs[i]});
}
auto end = std::chrono::system_clock::now();
gLog.debug() << "Fetching " << std::to_string(keys.size()) << " offers took "
<< std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took "
<< std::to_string(succMillis) << " milliseonds. Fetched next dir " << std::to_string(numSucc)
<< " times"
<< " Fetching next page of dir took " << std::to_string(pageMillis) << " milliseconds"
<< ". num pages = " << std::to_string(numPages) << ". Fetching all objects took "
<< std::to_string(getMillis(end - mid))
<< " milliseconds. total time = " << std::to_string(getMillis(end - begin)) << " milliseconds"
<< " book = " << ripple::strHex(book);
return page;
}
LedgerPage
BackendInterface::fetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
bool outOfOrder,
boost::asio::yield_context& yield) const
{
LedgerPage page;
std::vector<ripple::uint256> keys;
bool reachedEnd = false;
while (keys.size() < limit && !reachedEnd)
{
ripple::uint256 const& curCursor = keys.size() ? keys.back() : cursor ? *cursor : firstKey;
std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence;
auto succ = fetchSuccessorKey(curCursor, seq, yield);
if (!succ)
reachedEnd = true;
else
keys.push_back(std::move(*succ));
}
auto objects = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < objects.size(); ++i)
{
if (objects[i].size())
page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
else if (!outOfOrder)
{
gLog.error() << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
<< " - seq = " << ledgerSequence;
std::stringstream msg;
for (size_t j = 0; j < objects.size(); ++j)
{
msg << " - " << ripple::strHex(keys[j]);
}
gLog.error() << msg.str();
}
}
if (keys.size() && !reachedEnd)
page.cursor = keys.back();
return page;
}
std::optional<ripple::Fees>
BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const
{
ripple::Fees fees;
auto key = ripple::keylet::fees().key;
auto bytes = fetchLedgerObject(key, seq, yield);
if (!bytes)
{
gLog.error() << "Could not find fees";
return {};
}
ripple::SerialIter it(bytes->data(), bytes->size());
ripple::SLE sle{it, key};
if (sle.getFieldIndex(ripple::sfBaseFee) != -1)
fees.base = sle.getFieldU64(ripple::sfBaseFee);
if (sle.getFieldIndex(ripple::sfReferenceFeeUnits) != -1)
fees.units = sle.getFieldU32(ripple::sfReferenceFeeUnits);
if (sle.getFieldIndex(ripple::sfReserveBase) != -1)
fees.reserve = sle.getFieldU32(ripple::sfReserveBase);
if (sle.getFieldIndex(ripple::sfReserveIncrement) != -1)
fees.increment = sle.getFieldU32(ripple::sfReserveIncrement);
return fees;
}
} // namespace Backend

View File

@@ -1,613 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <ripple/ledger/ReadView.h>
#include <backend/DBHelpers.h>
#include <backend/SimpleCache.h>
#include <backend/Types.h>
#include <config/Config.h>
#include <log/Logger.h>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <thread>
#include <type_traits>
namespace Backend {
/**
* @brief Throws an error when database read time limit is exceeded.
*
* This class is throws an error when read time limit is exceeded but
* is also paired with a separate class to retry the connection.
*/
class DatabaseTimeout : public std::exception
{
public:
const char*
what() const throw() override
{
return "Database read timed out. Please retry the request";
}
};
/**
* @brief Separate class that reattempts connection after time limit.
*
* @tparam F Represents a class of handlers for Cassandra database.
* @param func Instance of Cassandra database handler class.
* @param waitMs Is the arbitrary time limit of 500ms.
* @return auto
*/
template <class F>
auto
retryOnTimeout(F func, size_t waitMs = 500)
{
static clio::Logger log{"Backend"};
while (true)
{
try
{
return func();
}
catch (DatabaseTimeout& t)
{
log.error() << "Database request timed out. Sleeping and retrying ... ";
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
}
}
}
/**
* @brief Passes in serialized handlers in an asynchronous fashion.
*
* Note that the synchronous auto passes handlers critical to supporting
* the Clio backend. The coroutine types are checked if same/different.
*
* @tparam F Represents a class of handlers for Cassandra database.
* @param f R-value instance of Cassandra handler class.
* @return auto
*/
template <class F>
auto
synchronous(F&& f)
{
/** @brief Serialized handlers and their execution.
*
* The ctx class is converted into a serialized handler, also named
* ctx, and is used to pass a stream of data into the method.
*/
boost::asio::io_context ctx;
boost::asio::io_context::strand strand(ctx);
std::optional<boost::asio::io_context::work> work;
/*! @brief Place the ctx within the vector of serialized handlers. */
work.emplace(ctx);
/**
* @brief If/else statements regarding coroutine type matching.
*
* R is the currently executing coroutine that is about to get passed.
* If corountine types do not match, the current one's type is stored.
*/
using R = typename boost::result_of<F(boost::asio::yield_context&)>::type;
if constexpr (!std::is_same<R, void>::value)
{
/**
* @brief When the coroutine type is the same
*
* The spawn function enables programs to implement asynchronous logic
* in a synchronous manner. res stores the instance of the currently
* executing coroutine, yield. The different type is returned.
*/
R res;
boost::asio::spawn(strand, [&f, &work, &res](boost::asio::yield_context yield) {
res = f(yield);
work.reset();
});
ctx.run();
return res;
}
else
{
/*! @brief When the corutine type is different, run as normal. */
boost::asio::spawn(strand, [&f, &work](boost::asio::yield_context yield) {
f(yield);
work.reset();
});
ctx.run();
}
}
/**
* @brief Reestablishes synchronous connection on timeout.
*
* @tparam Represents a class of handlers for Cassandra database.
* @param f R-value instance of Cassandra database handler class.
* @return auto
*/
template <class F>
auto
synchronousAndRetryOnTimeout(F&& f)
{
return retryOnTimeout([&]() { return synchronous(f); });
}
/*! @brief Handles ledger and transaction backend data. */
class BackendInterface
{
/**
* @brief Shared mutexes and a cache for the interface.
*
* rngMutex is a shared mutex. Shared mutexes prevent shared data
* from being accessed by multiple threads and has two levels of
* access: shared and exclusive.
*/
protected:
mutable std::shared_mutex rngMtx_;
std::optional<LedgerRange> range;
SimpleCache cache_;
/**
* @brief Public read methods
*
* All of these reads methods can throw DatabaseTimeout. When writing
* code in an RPC handler, this exception does not need to be caught:
* when an RPC results in a timeout, an error is returned to the client.
*/
public:
BackendInterface() = default;
virtual ~BackendInterface() = default;
/*! @brief LEDGER METHODS */
public:
/**
* @brief Cache that holds states of the ledger
*
* const version holds the original cache state; the other tracks
* historical changes.
*
* @return SimpleCache const&
*/
SimpleCache const&
cache() const
{
return cache_;
}
SimpleCache&
cache()
{
return cache_;
}
/*! @brief Fetches a specific ledger by sequence number. */
virtual std::optional<ripple::LedgerInfo>
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context& yield) const = 0;
/*! @brief Fetches a specific ledger by hash. */
virtual std::optional<ripple::LedgerInfo>
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context& yield) const = 0;
/*! @brief Fetches the latest ledger sequence. */
virtual std::optional<std::uint32_t>
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const = 0;
/*! @brief Fetches the current ledger range while locking that process */
std::optional<LedgerRange>
fetchLedgerRange() const
{
std::shared_lock lck(rngMtx_);
return range;
}
/**
* @brief Updates the range of sequences to be tracked.
*
* Function that continues updating the range sliding window or creates
* a new sliding window once the maxSequence limit has been reached.
*
* @param newMax Unsigned 32-bit integer representing new max of range.
*/
void
updateRange(uint32_t newMax)
{
std::scoped_lock lck(rngMtx_);
assert(!range || newMax >= range->maxSequence);
if (!range)
range = {newMax, newMax};
else
range->maxSequence = newMax;
}
/**
* @brief Returns the fees for specific transactions.
*
* @param seq Unsigned 32-bit integer reprsenting sequence.
* @param yield The currently executing coroutine.
* @return std::optional<ripple::Fees>
*/
std::optional<ripple::Fees>
fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const;
/*! @brief TRANSACTION METHODS */
/**
* @brief Fetches a specific transaction.
*
* @param hash Unsigned 256-bit integer representing hash.
* @param yield The currently executing coroutine.
* @return std::optional<TransactionAndMetadata>
*/
virtual std::optional<TransactionAndMetadata>
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches multiple transactions.
*
* @param hashes Unsigned integer value representing a hash.
* @param yield The currently executing coroutine.
* @return std::vector<TransactionAndMetadata>
*/
virtual std::vector<TransactionAndMetadata>
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches all transactions for a specific account
*
* @param account A specific XRPL Account, speciifed by unique type
* accountID.
* @param limit Paging limit for how many transactions can be returned per
* page.
* @param forward Boolean whether paging happens forwards or backwards.
* @param cursor Important metadata returned every time paging occurs.
* @param yield Currently executing coroutine.
* @return TransactionsAndCursor
*/
virtual TransactionsAndCursor
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t const limit,
bool forward,
std::optional<TransactionsCursor> const& cursor,
boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches all transactions from a specific ledger.
*
* @param ledgerSequence Unsigned 32-bit integer for latest total
* transactions.
* @param yield Currently executing coroutine.
* @return std::vector<TransactionAndMetadata>
*/
virtual std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches all transaction hashes from a specific ledger.
*
* @param ledgerSequence Standard unsigned integer.
* @param yield Currently executing coroutine.
* @return std::vector<ripple::uint256>
*/
virtual std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
/*! @brief NFT methods */
/**
* @brief Fetches a specific NFT
*
* @param tokenID Unsigned 256-bit integer.
* @param ledgerSequence Standard unsigned integer.
* @param yield Currently executing coroutine.
* @return std::optional<NFT>
*/
virtual std::optional<NFT>
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
const = 0;
/**
* @brief Fetches all transactions for a specific NFT.
*
* @param tokenID Unsigned 256-bit integer.
* @param limit Paging limit as to how many transactions return per page.
* @param forward Boolean whether paging happens forwards or backwards.
* @param cursorIn Represents transaction number and ledger sequence.
* @param yield Currently executing coroutine is passed in as input.
* @return TransactionsAndCursor
*/
virtual TransactionsAndCursor
fetchNFTTransactions(
ripple::uint256 const& tokenID,
std::uint32_t const limit,
bool const forward,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context& yield) const = 0;
/*! @brief STATE DATA METHODS */
/**
* @brief Fetches a specific ledger object: vector of unsigned chars
*
* @param key Unsigned 256-bit integer.
* @param sequence Unsigned 32-bit integer.
* @param yield Currently executing coroutine.
* @return std::optional<Blob>
*/
std::optional<Blob>
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield)
const;
/**
* @brief Fetches all ledger objects: a vector of vectors of unsigned chars.
*
* @param keys Unsigned 256-bit integer.
* @param sequence Unsigned 32-bit integer.
* @param yield Currently executing coroutine.
* @return std::vector<Blob>
*/
std::vector<Blob>
fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const;
/*! @brief Virtual function version of fetchLedgerObject */
virtual std::optional<Blob>
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context& yield)
const = 0;
/*! @brief Virtual function version of fetchLedgerObjects */
virtual std::vector<Blob>
doFetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t const sequence,
boost::asio::yield_context& yield) const = 0;
/**
* @brief Returns the difference between ledgers: vector of objects
*
* Objects are made of a key value, vector of unsigned chars (blob),
* and a boolean detailing whether keys and blob match.
*
* @param ledgerSequence Standard unsigned integer.
* @param yield Currently executing coroutine.
* @return std::vector<LedgerObject>
*/
virtual std::vector<LedgerObject>
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const = 0;
/**
* @brief Fetches a page of ledger objects, ordered by key/index.
*
* @param cursor Important metadata returned every time paging occurs.
* @param ledgerSequence Standard unsigned integer.
* @param limit Paging limit as to how many transactions returned per page.
* @param outOfOrder Boolean on whether ledger page is out of order.
* @param yield Currently executing coroutine.
* @return LedgerPage
*/
LedgerPage
fetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
bool outOfOrder,
boost::asio::yield_context& yield) const;
/*! @brief Fetches successor object from key/index. */
std::optional<LedgerObject>
fetchSuccessorObject(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
const;
/*! @brief Fetches successor key from key/index. */
std::optional<ripple::uint256>
fetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield) const;
/*! @brief Virtual function version of fetchSuccessorKey. */
virtual std::optional<ripple::uint256>
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context& yield)
const = 0;
/**
* @brief Fetches book offers.
*
* @param book Unsigned 256-bit integer.
* @param ledgerSequence Standard unsigned integer.
* @param limit Pagaing limit as to how many transactions returned per page.
* @param cursor Important metadata returned every time paging occurs.
* @param yield Currently executing coroutine.
* @return BookOffersPage
*/
BookOffersPage
fetchBookOffers(
ripple::uint256 const& book,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
boost::asio::yield_context& yield) const;
/**
* @brief Returns a ledger range
*
* Ledger range is a struct of min and max sequence numbers). Due to
* the use of [&], which denotes a special case of a lambda expression
* where values found outside the scope are passed by reference, wrt the
* currently executing coroutine.
*
* @return std::optional<LedgerRange>
*/
std::optional<LedgerRange>
hardFetchLedgerRange() const
{
return synchronous([&](boost::asio::yield_context yield) { return hardFetchLedgerRange(yield); });
}
/*! @brief Virtual function equivalent of hardFetchLedgerRange. */
virtual std::optional<LedgerRange>
hardFetchLedgerRange(boost::asio::yield_context& yield) const = 0;
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
std::optional<LedgerRange>
hardFetchLedgerRangeNoThrow() const;
/*! @brief Fetches ledger range but doesn't throw timeout. Use with care. */
std::optional<LedgerRange>
hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const;
/**
* @brief Writes to a specific ledger.
*
* @param ledgerInfo Const on ledger information.
* @param ledgerHeader r-value string representing ledger header.
*/
virtual void
writeLedger(ripple::LedgerInfo const& ledgerInfo, std::string&& ledgerHeader) = 0;
/**
* @brief Writes a new ledger object.
*
* The key and blob are r-value references and do NOT have memory addresses.
*
* @param key String represented as an r-value.
* @param seq Unsigned integer representing a sequence.
* @param blob r-value vector of unsigned characters (blob).
*/
virtual void
writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob);
/**
* @brief Writes a new transaction.
*
* @param hash r-value reference. No memory address.
* @param seq Unsigned 32-bit integer.
* @param date Unsigned 32-bit integer.
* @param transaction r-value reference. No memory address.
* @param metadata r-value refrence. No memory address.
*/
virtual void
writeTransaction(
std::string&& hash,
std::uint32_t const seq,
std::uint32_t const date,
std::string&& transaction,
std::string&& metadata) = 0;
/**
* @brief Write a new NFT.
*
* @param data Passed in as an r-value reference.
*/
virtual void
writeNFTs(std::vector<NFTsData>&& data) = 0;
/**
* @brief Write a new set of account transactions.
*
* @param data Passed in as an r-value reference.
*/
virtual void
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
/**
* @brief Write a new transaction for a specific NFT.
*
* @param data Passed in as an r-value reference.
*/
virtual void
writeNFTTransactions(std::vector<NFTTransactionsData>&& data) = 0;
/**
* @brief Write a new successor.
*
* @param key Passed in as an r-value reference.
* @param seq Unsigned 32-bit integer.
* @param successor Passed in as an r-value reference.
*/
virtual void
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) = 0;
/*! @brief Tells database we will write data for a specific ledger. */
virtual void
startWrites() const = 0;
/**
* @brief Tells database we finished writing all data for a specific ledger.
*
* TODO: change the return value to represent different results:
* Committed, write conflict, errored, successful but not committed
*
* @param ledgerSequence Const unsigned 32-bit integer on ledger sequence.
* @return true
* @return false
*/
bool
finishWrites(std::uint32_t const ledgerSequence);
/**
* @brief Selectively delets parts of the database.
*
* @param numLedgersToKeep Unsigned 32-bit integer on number of ledgers to
* keep.
* @param yield Currently executing coroutine.
* @return true
* @return false
*/
virtual bool
doOnlineDelete(std::uint32_t numLedgersToKeep, boost::asio::yield_context& yield) const = 0;
/**
* @brief Opens the database
*
* Open the database. Set up all of the necessary objects and
* datastructures. After this call completes, the database is
* ready for use.
*
* @param readOnly Boolean whether ledger is read only.
*/
virtual void
open(bool readOnly) = 0;
/*! @brief Closes the database, releasing any resources. */
virtual void
close(){};
virtual bool
isTooBusy() const = 0;
private:
/**
* @brief Private helper method to write ledger object
*
* @param key r-value string representing key.
* @param seq Unsigned 32-bit integer representing sequence.
* @param blob r-value vector of unsigned chars.
*/
virtual void
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) = 0;
virtual bool
doFinishWrites() = 0;
};
} // namespace Backend
using BackendInterface = Backend::BackendInterface;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,95 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <ripple/basics/base_uint.h>
#include <ripple/basics/hardened_hash.h>
#include <backend/Types.h>
#include <map>
#include <mutex>
#include <shared_mutex>
#include <utility>
#include <vector>
namespace Backend {
class SimpleCache
{
struct CacheEntry
{
uint32_t seq = 0;
Blob blob;
};
// counters for fetchLedgerObject(s) hit rate
mutable std::atomic_uint32_t objectReqCounter_ = 0;
mutable std::atomic_uint32_t objectHitCounter_ = 0;
// counters for fetchSuccessorKey hit rate
mutable std::atomic_uint32_t successorReqCounter_ = 0;
mutable std::atomic_uint32_t successorHitCounter_ = 0;
std::map<ripple::uint256, CacheEntry> map_;
mutable std::shared_mutex mtx_;
uint32_t latestSeq_ = 0;
std::atomic_bool full_ = false;
std::atomic_bool disabled_ = false;
// temporary set to prevent background thread from writing already deleted
// data. not used when cache is full
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
public:
// Update the cache with new ledger objects
// set isBackground to true when writing old data from a background thread
void
update(std::vector<LedgerObject> const& blobs, uint32_t seq, bool isBackground = false);
std::optional<Blob>
get(ripple::uint256 const& key, uint32_t seq) const;
// always returns empty optional if isFull() is false
std::optional<LedgerObject>
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
// always returns empty optional if isFull() is false
std::optional<LedgerObject>
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
void
setDisabled();
void
setFull();
uint32_t
latestLedgerSequence() const;
// whether the cache has all data for the most recent ledger
bool
isFull() const;
size_t
size() const;
float
getObjectHitRate() const;
float
getSuccessorHitRate() const;
};
} // namespace Backend

View File

@@ -1,79 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <backend/cassandra/Types.h>
#include <boost/asio/spawn.hpp>
#include <chrono>
#include <concepts>
#include <optional>
#include <string>
namespace Backend::Cassandra {
// clang-format off
template <typename T>
concept SomeSettingsProvider = requires(T a) {
{ a.getSettings() } -> std::same_as<Settings>;
{ a.getKeyspace() } -> std::same_as<std::string>;
{ a.getTablePrefix() } -> std::same_as<std::optional<std::string>>;
{ a.getReplicationFactor() } -> std::same_as<uint16_t>;
{ a.getTtl() } -> std::same_as<uint16_t>;
};
// clang-format on
// clang-format off
template <typename T>
concept SomeExecutionStrategy = requires(
T a,
Settings settings,
Handle handle,
Statement statement,
std::vector<Statement> statements,
PreparedStatement prepared,
boost::asio::yield_context token
) {
{ T(settings, handle) };
{ a.sync() } -> std::same_as<void>;
{ a.isTooBusy() } -> std::same_as<bool>;
{ a.writeSync(statement) } -> std::same_as<ResultOrError>;
{ a.writeSync(prepared) } -> std::same_as<ResultOrError>;
{ a.write(prepared) } -> std::same_as<void>;
{ a.write(std::move(statements)) } -> std::same_as<void>;
{ a.read(token, prepared) } -> std::same_as<ResultOrError>;
{ a.read(token, statement) } -> std::same_as<ResultOrError>;
{ a.read(token, statements) } -> std::same_as<ResultOrError>;
{ a.readEach(token, statements) } -> std::same_as<std::vector<Result>>;
};
// clang-format on
// clang-format off
template <typename T>
concept SomeRetryPolicy = requires(T a, boost::asio::io_context ioc, CassandraError err, uint32_t attempt) {
{ T(ioc) };
{ a.shouldRetry(err) } -> std::same_as<bool>;
{ a.retry([](){}) } -> std::same_as<void>;
{ a.calculateDelay(attempt) } -> std::same_as<std::chrono::milliseconds>;
};
// clang-format on
} // namespace Backend::Cassandra

View File

@@ -1,99 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <backend/cassandra/impl/ManagedObject.h>
#include <log/Logger.h>
#include <cassandra.h>
#include <chrono>
#include <optional>
#include <string>
#include <string_view>
#include <thread>
#include <variant>
namespace Backend::Cassandra::detail {
struct Settings
{
struct ContactPoints
{
std::string contactPoints = "127.0.0.1"; // defaults to localhost
std::optional<uint16_t> port;
};
struct SecureConnectionBundle
{
std::string bundle; // no meaningful default
};
bool enableLog = false;
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{10000};
std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all
std::variant<ContactPoints, SecureConnectionBundle> connectionInfo = ContactPoints{};
uint32_t threads = std::thread::hardware_concurrency();
uint32_t maxWriteRequestsOutstanding = 10'000;
uint32_t maxReadRequestsOutstanding = 100'000;
std::optional<std::string> certificate; // ssl context
std::optional<std::string> username;
std::optional<std::string> password;
Settings
withContactPoints(std::string_view contactPoints)
{
auto tmp = *this;
tmp.connectionInfo = ContactPoints{std::string{contactPoints}};
return tmp;
}
static Settings
defaultSettings()
{
return Settings();
}
};
class Cluster : public ManagedObject<CassCluster>
{
clio::Logger log_{"Backend"};
public:
Cluster(Settings const& settings);
private:
void
setupConnection(Settings const& settings);
void
setupContactPoints(Settings::ContactPoints const& points);
void
setupSecureBundle(Settings::SecureConnectionBundle const& bundle);
void
setupCertificate(Settings const& settings);
void
setupCredentials(Settings const& settings);
};
} // namespace Backend::Cassandra::detail

View File

@@ -1,443 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include <backend/cassandra/Handle.h>
#include <backend/cassandra/Types.h>
#include <backend/cassandra/impl/AsyncExecutor.h>
#include <log/Logger.h>
#include <util/Expected.h>
#include <boost/asio/async_result.hpp>
#include <boost/asio/spawn.hpp>
#include <atomic>
#include <condition_variable>
#include <functional>
#include <memory>
#include <mutex>
#include <optional>
#include <thread>
namespace Backend::Cassandra::detail {
/**
* @brief Implements async and sync querying against the cassandra DB with
* support for throttling.
*
* Note: A lot of the code that uses yield is repeated below. This is ok for now
* because we are hopefully going to be getting rid of it entirely later on.
*/
template <typename HandleType = Handle>
class DefaultExecutionStrategy
{
clio::Logger log_{"Backend"};
std::uint32_t maxWriteRequestsOutstanding_;
std::atomic_uint32_t numWriteRequestsOutstanding_ = 0;
std::uint32_t maxReadRequestsOutstanding_;
std::atomic_uint32_t numReadRequestsOutstanding_ = 0;
std::mutex throttleMutex_;
std::condition_variable throttleCv_;
std::mutex syncMutex_;
std::condition_variable syncCv_;
boost::asio::io_context ioc_;
std::optional<boost::asio::io_service::work> work_;
std::reference_wrapper<HandleType const> handle_;
std::thread thread_;
public:
using ResultOrErrorType = typename HandleType::ResultOrErrorType;
using StatementType = typename HandleType::StatementType;
using PreparedStatementType = typename HandleType::PreparedStatementType;
using FutureType = typename HandleType::FutureType;
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
using ResultType = typename HandleType::ResultType;
using CompletionTokenType = boost::asio::yield_context;
using FunctionType = void(boost::system::error_code);
using AsyncResultType = boost::asio::async_result<CompletionTokenType, FunctionType>;
using HandlerType = typename AsyncResultType::completion_handler_type;
DefaultExecutionStrategy(Settings settings, HandleType const& handle)
: maxWriteRequestsOutstanding_{settings.maxWriteRequestsOutstanding}
, maxReadRequestsOutstanding_{settings.maxReadRequestsOutstanding}
, work_{ioc_}
, handle_{std::cref(handle)}
, thread_{[this]() { ioc_.run(); }}
{
log_.info() << "Max write requests outstanding is " << maxWriteRequestsOutstanding_
<< "; Max read requests outstanding is " << maxReadRequestsOutstanding_;
}
~DefaultExecutionStrategy()
{
work_.reset();
ioc_.stop();
thread_.join();
}
/**
* @brief Wait for all async writes to finish before unblocking
*/
void
sync()
{
log_.debug() << "Waiting to sync all writes...";
std::unique_lock<std::mutex> lck(syncMutex_);
syncCv_.wait(lck, [this]() { return finishedAllWriteRequests(); });
log_.debug() << "Sync done.";
}
bool
isTooBusy() const
{
return numReadRequestsOutstanding_ >= maxReadRequestsOutstanding_;
}
/**
* @brief Blocking query execution used for writing data
*
* Retries forever sleeping for 5 milliseconds between attempts.
*/
ResultOrErrorType
writeSync(StatementType const& statement)
{
while (true)
{
if (auto res = handle_.get().execute(statement); res)
{
return res;
}
else
{
log_.warn() << "Cassandra sync write error, retrying: " << res.error();
std::this_thread::sleep_for(std::chrono::milliseconds(5));
}
}
}
/**
* @brief Blocking query execution used for writing data
*
* Retries forever sleeping for 5 milliseconds between attempts.
*/
template <typename... Args>
ResultOrErrorType
writeSync(PreparedStatementType const& preparedStatement, Args&&... args)
{
return writeSync(preparedStatement.bind(std::forward<Args>(args)...));
}
/**
* @brief Non-blocking query execution used for writing data
*
* Retries forever with retry policy specified by @ref AsyncExecutor
*
* @param prepradeStatement Statement to prepare and execute
* @param args Args to bind to the prepared statement
* @throw DatabaseTimeout on timeout
*/
template <typename... Args>
void
write(PreparedStatementType const& preparedStatement, Args&&... args)
{
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
incrementOutstandingRequestCount();
// Note: lifetime is controlled by std::shared_from_this internally
AsyncExecutor<std::decay_t<decltype(statement)>, HandleType>::run(
ioc_, handle_, std::move(statement), [this](auto const&) { decrementOutstandingRequestCount(); });
}
/**
* @brief Non-blocking batched query execution used for writing data
*
* Retries forever with retry policy specified by @ref AsyncExecutor.
*
* @param statements Vector of statements to execute as a batch
* @throw DatabaseTimeout on timeout
*/
void
write(std::vector<StatementType>&& statements)
{
if (statements.empty())
return;
incrementOutstandingRequestCount();
// Note: lifetime is controlled by std::shared_from_this internally
AsyncExecutor<std::decay_t<decltype(statements)>, HandleType>::run(
ioc_, handle_, std::move(statements), [this](auto const&) { decrementOutstandingRequestCount(); });
}
/**
* @brief Coroutine-based query execution used for reading data.
*
* Retries forever until successful or throws an exception on timeout.
*
* @param token Completion token (yield_context)
* @param prepradeStatement Statement to prepare and execute
* @param args Args to bind to the prepared statement
* @throw DatabaseTimeout on timeout
* @return ResultType or error wrapped in Expected
*/
template <typename... Args>
[[maybe_unused]] ResultOrErrorType
read(CompletionTokenType token, PreparedStatementType const& preparedStatement, Args&&... args)
{
return read(token, preparedStatement.bind(std::forward<Args>(args)...));
}
/**
* @brief Coroutine-based query execution used for reading data.
*
* Retries forever until successful or throws an exception on timeout.
*
* @param token Completion token (yield_context)
* @param statements Statements to execute in a batch
* @throw DatabaseTimeout on timeout
* @return ResultType or error wrapped in Expected
*/
[[maybe_unused]] ResultOrErrorType
read(CompletionTokenType token, std::vector<StatementType> const& statements)
{
auto handler = HandlerType{token};
auto result = AsyncResultType{handler};
auto const numStatements = statements.size();
// todo: perhaps use policy instead
while (true)
{
numReadRequestsOutstanding_ += numStatements;
auto const future = handle_.get().asyncExecute(statements, [handler](auto&&) mutable {
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
handler(boost::system::error_code{});
});
});
// suspend coroutine until completion handler is called
result.get();
numReadRequestsOutstanding_ -= numStatements;
// it's safe to call blocking get on future here as we already
// waited for the coroutine to resume above.
if (auto res = future.get(); res)
{
return res;
}
else
{
log_.error() << "Failed batch read in coroutine: " << res.error();
throwErrorIfNeeded(res.error());
}
}
}
/**
* @brief Coroutine-based query execution used for reading data.
*
* Retries forever until successful or throws an exception on timeout.
*
* @param token Completion token (yield_context)
* @param statement Statement to execute
* @throw DatabaseTimeout on timeout
* @return ResultType or error wrapped in Expected
*/
[[maybe_unused]] ResultOrErrorType
read(CompletionTokenType token, StatementType const& statement)
{
auto handler = HandlerType{token};
auto result = AsyncResultType{handler};
// todo: perhaps use policy instead
while (true)
{
++numReadRequestsOutstanding_;
auto const future = handle_.get().asyncExecute(statement, [handler](auto const&) mutable {
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
handler(boost::system::error_code{});
});
});
// suspend coroutine until completion handler is called
result.get();
--numReadRequestsOutstanding_;
// it's safe to call blocking get on future here as we already
// waited for the coroutine to resume above.
if (auto res = future.get(); res)
{
return res;
}
else
{
log_.error() << "Failed read in coroutine: " << res.error();
throwErrorIfNeeded(res.error());
}
}
}
/**
* @brief Coroutine-based query execution used for reading data.
*
* Attempts to execute each statement. On any error the whole vector will be
* discarded and exception will be thrown.
*
* @param token Completion token (yield_context)
* @param statements Statements to execute
* @throw DatabaseTimeout on db error
* @return Vector of results
*/
std::vector<ResultType>
readEach(CompletionTokenType token, std::vector<StatementType> const& statements)
{
auto handler = HandlerType{token};
auto result = AsyncResultType{handler};
std::atomic_bool hadError = false;
std::atomic_int numOutstanding = statements.size();
numReadRequestsOutstanding_ += statements.size();
auto futures = std::vector<FutureWithCallbackType>{};
futures.reserve(numOutstanding);
// used as the handler for each async statement individually
auto executionHandler = [handler, &hadError, &numOutstanding](auto const& res) mutable {
if (not res)
hadError = true;
// when all async operations complete unblock the result
if (--numOutstanding == 0)
boost::asio::post(boost::asio::get_associated_executor(handler), [handler]() mutable {
handler(boost::system::error_code{});
});
};
std::transform(
std::cbegin(statements),
std::cend(statements),
std::back_inserter(futures),
[this, &executionHandler](auto const& statement) {
return handle_.get().asyncExecute(statement, executionHandler);
});
// suspend coroutine until completion handler is called
result.get();
numReadRequestsOutstanding_ -= statements.size();
if (hadError)
throw DatabaseTimeout{};
std::vector<ResultType> results;
results.reserve(futures.size());
// it's safe to call blocking get on futures here as we already
// waited for the coroutine to resume above.
std::transform(
std::make_move_iterator(std::begin(futures)),
std::make_move_iterator(std::end(futures)),
std::back_inserter(results),
[](auto&& future) {
auto entry = future.get();
auto&& res = entry.value();
return std::move(res);
});
assert(futures.size() == statements.size());
assert(results.size() == statements.size());
return results;
}
private:
void
incrementOutstandingRequestCount()
{
{
std::unique_lock<std::mutex> lck(throttleMutex_);
if (!canAddWriteRequest())
{
log_.trace() << "Max outstanding requests reached. "
<< "Waiting for other requests to finish";
throttleCv_.wait(lck, [this]() { return canAddWriteRequest(); });
}
}
++numWriteRequestsOutstanding_;
}
void
decrementOutstandingRequestCount()
{
// sanity check
if (numWriteRequestsOutstanding_ == 0)
{
assert(false);
throw std::runtime_error("decrementing num outstanding below 0");
}
size_t cur = (--numWriteRequestsOutstanding_);
{
// mutex lock required to prevent race condition around spurious
// wakeup
std::lock_guard lck(throttleMutex_);
throttleCv_.notify_one();
}
if (cur == 0)
{
// mutex lock required to prevent race condition around spurious
// wakeup
std::lock_guard lck(syncMutex_);
syncCv_.notify_one();
}
}
bool
canAddWriteRequest() const
{
return numWriteRequestsOutstanding_ < maxWriteRequestsOutstanding_;
}
bool
finishedAllWriteRequests() const
{
return numWriteRequestsOutstanding_ == 0;
}
void
throwErrorIfNeeded(CassandraError err) const
{
if (err.isTimeout())
throw DatabaseTimeout();
if (err.isInvalidQuery())
throw std::runtime_error("Invalid query");
}
};
} // namespace Backend::Cassandra::detail

View File

@@ -0,0 +1,240 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "data/BackendCounters.hpp"
#include "util/Assert.hpp"
#include "util/prometheus/Label.hpp"
#include "util/prometheus/Prometheus.hpp"
#include <boost/json/object.hpp>
#include <chrono>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
namespace data {
namespace {
std::vector<std::int64_t> const histogramBuckets{1, 2, 5, 10, 20, 50, 100, 200, 500, 700, 1000};
std::int64_t
durationInMillisecondsSince(std::chrono::steady_clock::time_point const startTime)
{
return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - startTime).count();
}
} // namespace
using namespace util::prometheus;
BackendCounters::BackendCounters()
: tooBusyCounter_(PrometheusService::counterInt(
"backend_too_busy_total_number",
Labels(),
"The total number of times the backend was too busy to process a request"
))
, writeSyncCounter_(PrometheusService::counterInt(
"backend_operations_total_number",
Labels({Label{"operation", "write_sync"}}),
"The total number of times the backend had to write synchronously"
))
, writeSyncRetryCounter_(PrometheusService::counterInt(
"backend_operations_total_number",
Labels({Label{"operation", "write_sync_retry"}}),
"The total number of times the backend had to retry a synchronous write"
))
, asyncWriteCounters_{"write_async"}
, asyncReadCounters_{"read_async"}
, readDurationHistogram_(PrometheusService::histogramInt(
"backend_duration_milliseconds_histogram",
Labels({Label{"operation", "read"}}),
histogramBuckets,
"The duration of backend read operations including retries"
))
, writeDurationHistogram_(PrometheusService::histogramInt(
"backend_duration_milliseconds_histogram",
Labels({Label{"operation", "write"}}),
histogramBuckets,
"The duration of backend write operations including retries"
))
{
}
BackendCounters::PtrType
BackendCounters::make()
{
struct EnableMakeShared : public BackendCounters {};
return std::make_shared<EnableMakeShared>();
}
void
BackendCounters::registerTooBusy()
{
++tooBusyCounter_.get();
}
void
BackendCounters::registerWriteSync(std::chrono::steady_clock::time_point const startTime)
{
++writeSyncCounter_.get();
writeDurationHistogram_.get().observe(durationInMillisecondsSince(startTime));
}
void
BackendCounters::registerWriteSyncRetry()
{
++writeSyncRetryCounter_.get();
}
void
BackendCounters::registerWriteStarted()
{
asyncWriteCounters_.registerStarted(1u);
}
void
BackendCounters::registerWriteFinished(std::chrono::steady_clock::time_point const startTime)
{
asyncWriteCounters_.registerFinished(1u);
auto const duration = durationInMillisecondsSince(startTime);
writeDurationHistogram_.get().observe(duration);
}
void
BackendCounters::registerWriteRetry()
{
asyncWriteCounters_.registerRetry(1u);
}
void
BackendCounters::registerReadStarted(std::uint64_t const count)
{
asyncReadCounters_.registerStarted(count);
}
void
BackendCounters::registerReadFinished(std::chrono::steady_clock::time_point const startTime, std::uint64_t const count)
{
asyncReadCounters_.registerFinished(count);
auto const duration = durationInMillisecondsSince(startTime);
for (std::uint64_t i = 0; i < count; ++i)
readDurationHistogram_.get().observe(duration);
}
void
BackendCounters::registerReadRetry(std::uint64_t const count)
{
asyncReadCounters_.registerRetry(count);
}
void
BackendCounters::registerReadError(std::uint64_t const count)
{
asyncReadCounters_.registerError(count);
}
boost::json::object
BackendCounters::report() const
{
boost::json::object result;
result["too_busy"] = tooBusyCounter_.get().value();
result["write_sync"] = writeSyncCounter_.get().value();
result["write_sync_retry"] = writeSyncRetryCounter_.get().value();
for (auto const& [key, value] : asyncWriteCounters_.report())
result[key] = value;
for (auto const& [key, value] : asyncReadCounters_.report())
result[key] = value;
return result;
}
BackendCounters::AsyncOperationCounters::AsyncOperationCounters(std::string name)
: name_(std::move(name))
, pendingCounter_(PrometheusService::gaugeInt(
"backend_operations_current_number",
Labels({{"operation", name_}, {"status", "pending"}}),
"The current number of pending " + name_ + " operations"
))
, completedCounter_(PrometheusService::counterInt(
"backend_operations_total_number",
Labels({{"operation", name_}, {"status", "completed"}}),
"The total number of completed " + name_ + " operations"
))
, retryCounter_(PrometheusService::counterInt(
"backend_operations_total_number",
Labels({{"operation", name_}, {"status", "retry"}}),
"The total number of retried " + name_ + " operations"
))
, errorCounter_(PrometheusService::counterInt(
"backend_operations_total_number",
Labels({{"operation", name_}, {"status", "error"}}),
"The total number of errored " + name_ + " operations"
))
{
}
void
BackendCounters::AsyncOperationCounters::registerStarted(std::uint64_t const count)
{
pendingCounter_.get() += count;
}
void
BackendCounters::AsyncOperationCounters::registerFinished(std::uint64_t const count)
{
ASSERT(
pendingCounter_.get().value() >= static_cast<std::int64_t>(count),
"Finished operations can't be more than pending"
);
pendingCounter_.get() -= count;
completedCounter_.get() += count;
}
void
BackendCounters::AsyncOperationCounters::registerRetry(std::uint64_t count)
{
retryCounter_.get() += count;
}
void
BackendCounters::AsyncOperationCounters::registerError(std::uint64_t count)
{
ASSERT(
pendingCounter_.get().value() >= static_cast<std::int64_t>(count), "Error operations can't be more than pending"
);
pendingCounter_.get() -= count;
errorCounter_.get() += count;
}
boost::json::object
BackendCounters::AsyncOperationCounters::report() const
{
return boost::json::object{
{name_ + "_pending", pendingCounter_.get().value()},
{name_ + "_completed", completedCounter_.get().value()},
{name_ + "_retry", retryCounter_.get().value()},
{name_ + "_error", errorCounter_.get().value()}
};
}
} // namespace data

View File

@@ -0,0 +1,164 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "util/prometheus/Counter.hpp"
#include "util/prometheus/Gauge.hpp"
#include "util/prometheus/Histogram.hpp"
#include <boost/json/object.hpp>
#include <chrono>
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
namespace data {
/**
* @brief A concept for a class that can be used to count backend operations.
*/
template <typename T>
concept SomeBackendCounters = requires(T a) {
typename T::PtrType;
{
a.registerTooBusy()
} -> std::same_as<void>;
{
a.registerWriteSync(std::chrono::steady_clock::time_point{})
} -> std::same_as<void>;
{
a.registerWriteSyncRetry()
} -> std::same_as<void>;
{
a.registerWriteStarted()
} -> std::same_as<void>;
{
a.registerWriteFinished(std::chrono::steady_clock::time_point{})
} -> std::same_as<void>;
{
a.registerWriteRetry()
} -> std::same_as<void>;
{
a.registerReadStarted(std::uint64_t{})
} -> std::same_as<void>;
{
a.registerReadFinished(std::chrono::steady_clock::time_point{}, std::uint64_t{})
} -> std::same_as<void>;
{
a.registerReadRetry(std::uint64_t{})
} -> std::same_as<void>;
{
a.registerReadError(std::uint64_t{})
} -> std::same_as<void>;
{
a.report()
} -> std::same_as<boost::json::object>;
};
/**
* @brief Holds statistics about the backend.
*
* @note This class is thread-safe.
*/
class BackendCounters {
public:
using PtrType = std::shared_ptr<BackendCounters>;
static PtrType
make();
void
registerTooBusy();
void
registerWriteSync(std::chrono::steady_clock::time_point startTime);
void
registerWriteSyncRetry();
void
registerWriteStarted();
void
registerWriteFinished(std::chrono::steady_clock::time_point startTime);
void
registerWriteRetry();
void
registerReadStarted(std::uint64_t count = 1u);
void
registerReadFinished(std::chrono::steady_clock::time_point startTime, std::uint64_t count = 1u);
void
registerReadRetry(std::uint64_t count = 1u);
void
registerReadError(std::uint64_t count = 1u);
boost::json::object
report() const;
private:
BackendCounters();
class AsyncOperationCounters {
public:
AsyncOperationCounters(std::string name);
void
registerStarted(std::uint64_t count);
void
registerFinished(std::uint64_t count);
void
registerRetry(std::uint64_t count);
void
registerError(std::uint64_t count);
boost::json::object
report() const;
private:
std::string name_;
std::reference_wrapper<util::prometheus::GaugeInt> pendingCounter_;
std::reference_wrapper<util::prometheus::CounterInt> completedCounter_;
std::reference_wrapper<util::prometheus::CounterInt> retryCounter_;
std::reference_wrapper<util::prometheus::CounterInt> errorCounter_;
};
std::reference_wrapper<util::prometheus::CounterInt> tooBusyCounter_;
std::reference_wrapper<util::prometheus::CounterInt> writeSyncCounter_;
std::reference_wrapper<util::prometheus::CounterInt> writeSyncRetryCounter_;
AsyncOperationCounters asyncWriteCounters_{"write_async"};
AsyncOperationCounters asyncReadCounters_{"read_async"};
std::reference_wrapper<util::prometheus::HistogramInt> readDurationHistogram_;
std::reference_wrapper<util::prometheus::HistogramInt> writeDurationHistogram_;
};
} // namespace data

View File

@@ -19,52 +19,52 @@
#pragma once
#include <backend/BackendInterface.h>
#include <backend/CassandraBackend.h>
#include <backend/CassandraBackendNew.h>
#include <config/Config.h>
#include <log/Logger.h>
#include "data/BackendInterface.hpp"
#include "data/CassandraBackend.hpp"
#include "data/cassandra/SettingsProvider.hpp"
#include "util/config/Config.hpp"
#include "util/log/Logger.hpp"
#include <boost/algorithm/string.hpp>
#include <boost/algorithm/string/predicate.hpp>
namespace Backend {
std::shared_ptr<BackendInterface>
make_Backend(boost::asio::io_context& ioc, clio::Config const& config)
#include <memory>
#include <stdexcept>
#include <string>
namespace data {
/**
* @brief A factory function that creates the backend based on a config.
*
* @param config The clio config to use
* @return A shared_ptr<BackendInterface> with the selected implementation
*/
inline std::shared_ptr<BackendInterface>
make_Backend(util::Config const& config)
{
static clio::Logger log{"Backend"};
log.info() << "Constructing BackendInterface";
static util::Logger const log{"Backend"};
LOG(log.info()) << "Constructing BackendInterface";
auto readOnly = config.valueOr("read_only", false);
auto type = config.value<std::string>("database.type");
auto const readOnly = config.valueOr("read_only", false);
auto const type = config.value<std::string>("database.type");
std::shared_ptr<BackendInterface> backend = nullptr;
if (boost::iequals(type, "cassandra"))
{
// TODO: retire `cassandra-new` by next release after 2.0
if (boost::iequals(type, "cassandra") or boost::iequals(type, "cassandra-new")) {
auto cfg = config.section("database." + type);
auto ttl = config.valueOr<uint32_t>("online_delete", 0) * 4;
backend = std::make_shared<CassandraBackend>(ioc, cfg, ttl);
}
else if (boost::iequals(type, "cassandra-new"))
{
auto cfg = config.section("database." + type);
auto ttl = config.valueOr<uint16_t>("online_delete", 0) * 4;
backend =
std::make_shared<Backend::Cassandra::CassandraBackend>(Backend::Cassandra::SettingsProvider{cfg, ttl});
backend = std::make_shared<data::cassandra::CassandraBackend>(data::cassandra::SettingsProvider{cfg}, readOnly);
}
if (!backend)
throw std::runtime_error("Invalid database type");
backend->open(readOnly);
auto rng = backend->hardFetchLedgerRangeNoThrow();
auto const rng = backend->hardFetchLedgerRangeNoThrow();
if (rng)
{
backend->updateRange(rng->minSequence);
backend->updateRange(rng->maxSequence);
}
log.info() << "Constructed BackendInterface Successfully";
backend->setRange(rng->minSequence, rng->maxSequence);
LOG(log.info()) << "Constructed BackendInterface Successfully";
return backend;
}
} // namespace Backend
} // namespace data

View File

@@ -0,0 +1,387 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include "data/BackendInterface.hpp"
#include "data/Types.hpp"
#include "util/Assert.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/spawn.hpp>
#include <ripple/basics/base_uint.h>
#include <ripple/basics/strHex.h>
#include <ripple/protocol/Fees.h>
#include <ripple/protocol/Indexes.h>
#include <ripple/protocol/SField.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/Serializer.h>
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <mutex>
#include <optional>
#include <shared_mutex>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
// local to compilation unit loggers
namespace {
util::Logger gLog{"Backend"};
} // namespace
namespace data {
bool
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
{
LOG(gLog.debug()) << "Want finish writes for " << ledgerSequence;
auto commitRes = doFinishWrites();
if (commitRes) {
LOG(gLog.debug()) << "Successfully commited. Updating range now to " << ledgerSequence;
updateRange(ledgerSequence);
}
return commitRes;
}
void
BackendInterface::writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob)
{
ASSERT(key.size() == sizeof(ripple::uint256), "Key must be 256 bits");
doWriteLedgerObject(std::move(key), seq, std::move(blob));
}
std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRangeNoThrow() const
{
return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
}
// *** state data methods
std::optional<Blob>
BackendInterface::fetchLedgerObject(
ripple::uint256 const& key,
std::uint32_t const sequence,
boost::asio::yield_context yield
) const
{
auto obj = cache_.get(key, sequence);
if (obj) {
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
return *obj;
}
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
auto dbObj = doFetchLedgerObject(key, sequence, yield);
if (!dbObj) {
LOG(gLog.trace()) << "Missed cache and missed in db";
} else {
LOG(gLog.trace()) << "Missed cache but found in db";
}
return dbObj;
}
std::vector<Blob>
BackendInterface::fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t const sequence,
boost::asio::yield_context yield
) const
{
std::vector<Blob> results;
results.resize(keys.size());
std::vector<ripple::uint256> misses;
for (size_t i = 0; i < keys.size(); ++i) {
auto obj = cache_.get(keys[i], sequence);
if (obj) {
results[i] = *obj;
} else {
misses.push_back(keys[i]);
}
}
LOG(gLog.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
if (!misses.empty()) {
auto objs = doFetchLedgerObjects(misses, sequence, yield);
for (size_t i = 0, j = 0; i < results.size(); ++i) {
if (results[i].empty()) {
results[i] = objs[j];
++j;
}
}
}
return results;
}
// Fetches the successor to key/index
std::optional<ripple::uint256>
BackendInterface::fetchSuccessorKey(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const
{
auto succ = cache_.getSuccessor(key, ledgerSequence);
if (succ) {
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
} else {
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
}
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
}
std::optional<LedgerObject>
BackendInterface::fetchSuccessorObject(
ripple::uint256 key,
std::uint32_t const ledgerSequence,
boost::asio::yield_context yield
) const
{
auto succ = fetchSuccessorKey(key, ledgerSequence, yield);
if (succ) {
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
if (!obj)
return {{*succ, {}}};
return {{*succ, *obj}};
}
return {};
}
BookOffersPage
BackendInterface::fetchBookOffers(
ripple::uint256 const& book,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
boost::asio::yield_context yield
) const
{
// TODO try to speed this up. This can take a few seconds. The goal is
// to get it down to a few hundred milliseconds.
BookOffersPage page;
ripple::uint256 const bookEnd = ripple::getQualityNext(book);
ripple::uint256 uTipIndex = book;
std::vector<ripple::uint256> keys;
auto getMillis = [](auto diff) { return std::chrono::duration_cast<std::chrono::milliseconds>(diff).count(); };
auto begin = std::chrono::system_clock::now();
std::uint32_t numSucc = 0;
std::uint32_t numPages = 0;
long succMillis = 0;
long pageMillis = 0;
while (keys.size() < limit) {
auto mid1 = std::chrono::system_clock::now();
auto offerDir = fetchSuccessorObject(uTipIndex, ledgerSequence, yield);
auto mid2 = std::chrono::system_clock::now();
numSucc++;
succMillis += getMillis(mid2 - mid1);
if (!offerDir || offerDir->key >= bookEnd) {
LOG(gLog.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking";
break;
}
uTipIndex = offerDir->key;
while (keys.size() < limit) {
++numPages;
ripple::STLedgerEntry const sle{
ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key
};
auto indexes = sle.getFieldV256(ripple::sfIndexes);
keys.insert(keys.end(), indexes.begin(), indexes.end());
auto next = sle.getFieldU64(ripple::sfIndexNext);
if (next == 0u) {
LOG(gLog.trace()) << "Next is empty. breaking";
break;
}
auto nextKey = ripple::keylet::page(uTipIndex, next);
auto nextDir = fetchLedgerObject(nextKey.key, ledgerSequence, yield);
ASSERT(nextDir.has_value(), "Next dir must exist");
offerDir->blob = *nextDir;
offerDir->key = nextKey.key;
}
auto mid3 = std::chrono::system_clock::now();
pageMillis += getMillis(mid3 - mid2);
}
auto mid = std::chrono::system_clock::now();
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < keys.size() && i < limit; ++i) {
LOG(gLog.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
<< " ledgerSequence = " << ledgerSequence;
ASSERT(!objs[i].empty(), "Ledger object can't be empty");
page.offers.push_back({keys[i], objs[i]});
}
auto end = std::chrono::system_clock::now();
LOG(gLog.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took "
<< std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took "
<< std::to_string(succMillis) << " milliseonds. Fetched next dir " << std::to_string(numSucc)
<< " times"
<< " Fetching next page of dir took " << std::to_string(pageMillis) << " milliseconds"
<< ". num pages = " << std::to_string(numPages) << ". Fetching all objects took "
<< std::to_string(getMillis(end - mid))
<< " milliseconds. total time = " << std::to_string(getMillis(end - begin)) << " milliseconds"
<< " book = " << ripple::strHex(book);
return page;
}
std::optional<LedgerRange>
BackendInterface::hardFetchLedgerRange() const
{
return synchronous([this](auto yield) { return hardFetchLedgerRange(yield); });
}
std::optional<LedgerRange>
BackendInterface::fetchLedgerRange() const
{
std::shared_lock const lck(rngMtx_);
return range;
}
void
BackendInterface::updateRange(uint32_t newMax)
{
std::scoped_lock const lck(rngMtx_);
ASSERT(
!range || newMax >= range->maxSequence,
"Range shouldn't exist yet or newMax should be greater. newMax = {}, range->maxSequence = {}",
newMax,
range->maxSequence
);
if (!range) {
range = {newMax, newMax};
} else {
range->maxSequence = newMax;
}
}
void
BackendInterface::setRange(uint32_t min, uint32_t max, bool force)
{
std::scoped_lock const lck(rngMtx_);
if (!force) {
ASSERT(min <= max, "Range min must be less than or equal to max");
ASSERT(not range.has_value(), "Range was already set");
}
range = {min, max};
}
LedgerPage
BackendInterface::fetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t const ledgerSequence,
std::uint32_t const limit,
bool outOfOrder,
boost::asio::yield_context yield
) const
{
LedgerPage page;
std::vector<ripple::uint256> keys;
bool reachedEnd = false;
while (keys.size() < limit && !reachedEnd) {
ripple::uint256 const& curCursor = !keys.empty() ? keys.back() : (cursor ? *cursor : firstKey);
std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence;
auto succ = fetchSuccessorKey(curCursor, seq, yield);
if (!succ) {
reachedEnd = true;
} else {
keys.push_back(*succ);
}
}
auto objects = fetchLedgerObjects(keys, ledgerSequence, yield);
for (size_t i = 0; i < objects.size(); ++i) {
if (!objects[i].empty()) {
page.objects.push_back({keys[i], std::move(objects[i])});
} else if (!outOfOrder) {
LOG(gLog.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
<< " - seq = " << ledgerSequence;
std::stringstream msg;
for (size_t j = 0; j < objects.size(); ++j) {
msg << " - " << ripple::strHex(keys[j]);
}
LOG(gLog.error()) << msg.str();
}
}
if (!keys.empty() && !reachedEnd)
page.cursor = keys.back();
return page;
}
std::optional<ripple::Fees>
BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context yield) const
{
ripple::Fees fees;
auto key = ripple::keylet::fees().key;
auto bytes = fetchLedgerObject(key, seq, yield);
if (!bytes) {
LOG(gLog.error()) << "Could not find fees";
return {};
}
ripple::SerialIter it(bytes->data(), bytes->size());
ripple::SLE const sle{it, key};
// XRPFees amendment introduced new fields for fees calculations.
// New fields are set and the old fields are removed via `set_fees` tx.
// Fallback to old fields if `set_fees` was not yet used to update the fields on this tx.
auto hasNewFields = false;
{
auto const baseFeeXRP = sle.at(~ripple::sfBaseFeeDrops);
auto const reserveBaseXRP = sle.at(~ripple::sfReserveBaseDrops);
auto const reserveIncrementXRP = sle.at(~ripple::sfReserveIncrementDrops);
if (baseFeeXRP)
fees.base = baseFeeXRP->xrp();
if (reserveBaseXRP)
fees.reserve = reserveBaseXRP->xrp();
if (reserveIncrementXRP)
fees.increment = reserveIncrementXRP->xrp();
hasNewFields = baseFeeXRP || reserveBaseXRP || reserveIncrementXRP;
}
if (not hasNewFields) {
// Fallback to old fields
auto const baseFee = sle.at(~ripple::sfBaseFee);
auto const reserveBase = sle.at(~ripple::sfReserveBase);
auto const reserveIncrement = sle.at(~ripple::sfReserveIncrement);
if (baseFee)
fees.base = baseFee.value();
if (reserveBase)
fees.reserve = reserveBase.value();
if (reserveIncrement)
fees.increment = reserveIncrement.value();
}
return fees;
}
} // namespace data

View File

@@ -0,0 +1,615 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/DBHelpers.hpp"
#include "data/LedgerCache.hpp"
#include "data/Types.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio/executor_work_guard.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <boost/json/object.hpp>
#include <boost/utility/result_of.hpp>
#include <ripple/basics/base_uint.h>
#include <ripple/protocol/AccountID.h>
#include <ripple/protocol/Fees.h>
#include <ripple/protocol/LedgerHeader.h>
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <exception>
#include <optional>
#include <shared_mutex>
#include <string>
#include <thread>
#include <type_traits>
#include <vector>
namespace data {
/**
* @brief Represents a database timeout error.
*/
class DatabaseTimeout : public std::exception {
public:
char const*
what() const throw() override
{
return "Database read timed out. Please retry the request";
}
};
static constexpr std::size_t DEFAULT_WAIT_BETWEEN_RETRY = 500;
/**
* @brief A helper function that catches DatabaseTimout exceptions and retries indefinitely.
*
* @tparam FnType The type of function object to execute
* @param func The function object to execute
* @param waitMs Delay between retry attempts
* @return auto The same as the return type of func
*/
template <class FnType>
auto
retryOnTimeout(FnType func, size_t waitMs = DEFAULT_WAIT_BETWEEN_RETRY)
{
static util::Logger const log{"Backend"};
while (true) {
try {
return func();
} catch (DatabaseTimeout const&) {
LOG(log.error()) << "Database request timed out. Sleeping and retrying ... ";
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
}
}
}
/**
* @brief Synchronously executes the given function object inside a coroutine.
*
* @tparam FnType The type of function object to execute
* @param func The function object to execute
* @return auto The same as the return type of func
*/
template <class FnType>
auto
synchronous(FnType&& func)
{
boost::asio::io_context ctx;
using R = typename boost::result_of<FnType(boost::asio::yield_context)>::type;
if constexpr (!std::is_same_v<R, void>) {
R res;
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) {
res = func(yield);
});
ctx.run();
return res;
} else {
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func](auto yield) { func(yield); });
ctx.run();
}
}
/**
* @brief Synchronously execute the given function object and retry until no DatabaseTimeout is thrown.
*
* @tparam FnType The type of function object to execute
* @param func The function object to execute
* @return auto The same as the return type of func
*/
template <class FnType>
auto
synchronousAndRetryOnTimeout(FnType&& func)
{
return retryOnTimeout([&]() { return synchronous(func); });
}
/**
* @brief The interface to the database used by Clio.
*/
class BackendInterface {
protected:
mutable std::shared_mutex rngMtx_;
std::optional<LedgerRange> range;
LedgerCache cache_;
public:
BackendInterface() = default;
virtual ~BackendInterface() = default;
// TODO: Remove this hack. Cache should not be exposed thru BackendInterface
/**
* @return Immutable cache
*/
LedgerCache const&
cache() const
{
return cache_;
}
/**
* @return Mutable cache
*/
LedgerCache&
cache()
{
return cache_;
}
/**
* @brief Fetches a specific ledger by sequence number.
*
* @param sequence The sequence number to fetch for
* @param yield The coroutine context
* @return The ripple::LedgerHeader if found; nullopt otherwise
*/
virtual std::optional<ripple::LedgerHeader>
fetchLedgerBySequence(std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches a specific ledger by hash.
*
* @param hash The hash to fetch for
* @param yield The coroutine context
* @return The ripple::LedgerHeader if found; nullopt otherwise
*/
virtual std::optional<ripple::LedgerHeader>
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches the latest ledger sequence.
*
* @param yield The coroutine context
* @return Latest sequence wrapped in an optional if found; nullopt otherwise
*/
virtual std::optional<std::uint32_t>
fetchLatestLedgerSequence(boost::asio::yield_context yield) const = 0;
/**
* @brief Fetch the current ledger range.
*
* @return The current ledger range if populated; nullopt otherwise
*/
std::optional<LedgerRange>
fetchLedgerRange() const;
/**
* @brief Updates the range of sequences that are stored in the DB.
*
* @param newMax The new maximum sequence available
*/
void
updateRange(uint32_t newMax);
/**
* @brief Sets the range of sequences that are stored in the DB.
*
* @param min The new minimum sequence available
* @param max The new maximum sequence available
* @param force If set to true, the range will be set even if it's already set
*/
void
setRange(uint32_t min, uint32_t max, bool force = false);
/**
* @brief Fetch the fees from a specific ledger sequence.
*
* @param seq The sequence to fetch for
* @param yield The coroutine context
* @return ripple::Fees if fees are found; nullopt otherwise
*/
std::optional<ripple::Fees>
fetchFees(std::uint32_t seq, boost::asio::yield_context yield) const;
/**
* @brief Fetches a specific transaction.
*
* @param hash The hash of the transaction to fetch
* @param yield The coroutine context
* @return TransactionAndMetadata if transaction is found; nullopt otherwise
*/
virtual std::optional<TransactionAndMetadata>
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches multiple transactions.
*
* @param hashes A vector of hashes to fetch transactions for
* @param yield The coroutine context
* @return A vector of TransactionAndMetadata matching the given hashes
*/
virtual std::vector<TransactionAndMetadata>
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches all transactions for a specific account.
*
* @param account The account to fetch transactions for
* @param limit The maximum number of transactions per result page
* @param forward Whether to fetch the page forwards or backwards from the given cursor
* @param cursor The cursor to resume fetching from
* @param yield The coroutine context
* @return Results and a cursor to resume from
*/
virtual TransactionsAndCursor
fetchAccountTransactions(
ripple::AccountID const& account,
std::uint32_t limit,
bool forward,
std::optional<TransactionsCursor> const& cursor,
boost::asio::yield_context yield
) const = 0;
/**
* @brief Fetches all transactions from a specific ledger.
*
* @param ledgerSequence The ledger sequence to fetch for
* @param yield The coroutine context
* @return Results as a vector of TransactionAndMetadata
*/
virtual std::vector<TransactionAndMetadata>
fetchAllTransactionsInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches all transaction hashes from a specific ledger.
*
* @param ledgerSequence The ledger sequence to fetch for
* @param yield The coroutine context
* @return Hashes as ripple::uint256 in a vector
*/
virtual std::vector<ripple::uint256>
fetchAllTransactionHashesInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches a specific NFT.
*
* @param tokenID The ID of the NFT
* @param ledgerSequence The ledger sequence to fetch for
* @param yield The coroutine context
* @return NFT object on success; nullopt otherwise
*/
virtual std::optional<NFT>
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches all transactions for a specific NFT.
*
* @param tokenID The ID of the NFT
* @param limit The maximum number of transactions per result page
* @param forward Whether to fetch the page forwards or backwards from the given cursor
* @param cursorIn The cursor to resume fetching from
* @param yield The coroutine context
* @return Results and a cursor to resume from
*/
virtual TransactionsAndCursor
fetchNFTTransactions(
ripple::uint256 const& tokenID,
std::uint32_t limit,
bool forward,
std::optional<TransactionsCursor> const& cursorIn,
boost::asio::yield_context yield
) const = 0;
/**
* @brief Fetches all NFTs issued by a given address.
*
* @param issuer AccountID of issuer you wish you query.
* @param taxon Optional taxon of NFTs by which you wish to filter.
* @param limit Paging limit.
* @param cursorIn Optional cursor to allow us to pick up from where we
* last left off.
* @param yield Currently executing coroutine.
* @return std::vector<NFT> of NFTs issued by this account, or
* this issuer/taxon combination if taxon is passed and an optional marker
*/
virtual NFTsAndCursor
fetchNFTsByIssuer(
ripple::AccountID const& issuer,
std::optional<std::uint32_t> const& taxon,
std::uint32_t ledgerSequence,
std::uint32_t limit,
std::optional<ripple::uint256> const& cursorIn,
boost::asio::yield_context yield
) const = 0;
/**
* @brief Fetches a specific ledger object.
*
* Currently the real fetch happens in doFetchLedgerObject and fetchLedgerObject attempts to fetch from Cache first
* and only calls out to the real DB if a cache miss ocurred.
*
* @param key The key of the object
* @param sequence The ledger sequence to fetch for
* @param yield The coroutine context
* @return The object as a Blob on success; nullopt otherwise
*/
std::optional<Blob>
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const;
/**
* @brief Fetches all ledger objects by their keys.
*
* Currently the real fetch happens in doFetchLedgerObjects and fetchLedgerObjects attempts to fetch from Cache
* first and only calls out to the real DB for each of the keys that was not found in the cache.
*
* @param keys A vector with the keys of the objects to fetch
* @param sequence The ledger sequence to fetch for
* @param yield The coroutine context
* @return A vector of ledger objects as Blobs
*/
std::vector<Blob>
fetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t sequence,
boost::asio::yield_context yield
) const;
/**
* @brief The database-specific implementation for fetching a ledger object.
*
* @param key The key to fetch for
* @param sequence The ledger sequence to fetch for
* @param yield The coroutine context
* @return The object as a Blob on success; nullopt otherwise
*/
virtual std::optional<Blob>
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
/**
* @brief The database-specific implementation for fetching ledger objects.
*
* @param keys The keys to fetch for
* @param sequence The ledger sequence to fetch for
* @param yield The coroutine context
* @return A vector of Blobs representing each fetched object
*/
virtual std::vector<Blob>
doFetchLedgerObjects(
std::vector<ripple::uint256> const& keys,
std::uint32_t sequence,
boost::asio::yield_context yield
) const = 0;
/**
* @brief Returns the difference between ledgers.
*
* @param ledgerSequence The ledger sequence to fetch for
* @param yield The coroutine context
* @return A vector of LedgerObject representing the diff
*/
virtual std::vector<LedgerObject>
fetchLedgerDiff(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches a page of ledger objects, ordered by key/index.
*
* @param cursor The cursor to resume fetching from
* @param ledgerSequence The ledger sequence to fetch for
* @param limit The maximum number of transactions per result page
* @param outOfOrder If set to true max available sequence is used instead of ledgerSequence
* @param yield The coroutine context
* @return The ledger page
*/
LedgerPage
fetchLedgerPage(
std::optional<ripple::uint256> const& cursor,
std::uint32_t ledgerSequence,
std::uint32_t limit,
bool outOfOrder,
boost::asio::yield_context yield
) const;
/**
* @brief Fetches the successor object.
*
* @param key The key to fetch for
* @param ledgerSequence The ledger sequence to fetch for
* @param yield The coroutine context
* @return The sucessor on success; nullopt otherwise
*/
std::optional<LedgerObject>
fetchSuccessorObject(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
/**
* @brief Fetches the successor key.
*
* Thea real fetch happens in doFetchSuccessorKey. This function will attempt to lookup the successor in the cache
* first and only if it's not found in the cache will it fetch from the actual DB.
*
* @param key The key to fetch for
* @param ledgerSequence The ledger sequence to fetch for
* @param yield The coroutine context
* @return The sucessor key on success; nullopt otherwise
*/
std::optional<ripple::uint256>
fetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
/**
* @brief Database-specific implementation of fetching the successor key
*
* @param key The key to fetch for
* @param ledgerSequence The ledger sequence to fetch for
* @param yield The coroutine context
* @return The sucessor on success; nullopt otherwise
*/
virtual std::optional<ripple::uint256>
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches book offers.
*
* @param book Unsigned 256-bit integer.
* @param ledgerSequence The ledger sequence to fetch for
* @param limit Pagaing limit as to how many transactions returned per page.
* @param yield The coroutine context
* @return The book offers page
*/
BookOffersPage
fetchBookOffers(
ripple::uint256 const& book,
std::uint32_t ledgerSequence,
std::uint32_t limit,
boost::asio::yield_context yield
) const;
/**
* @brief Synchronously fetches the ledger range from DB.
*
* This function just wraps hardFetchLedgerRange(boost::asio::yield_context) using synchronous(FnType&&).
*
* @return The ledger range if available; nullopt otherwise
*/
std::optional<LedgerRange>
hardFetchLedgerRange() const;
/**
* @brief Fetches the ledger range from DB.
*
* @return The ledger range if available; nullopt otherwise
*/
virtual std::optional<LedgerRange>
hardFetchLedgerRange(boost::asio::yield_context yield) const = 0;
/**
* @brief Fetches the ledger range from DB retrying until no DatabaseTimeout is thrown.
*
* @return The ledger range if available; nullopt otherwise
*/
std::optional<LedgerRange>
hardFetchLedgerRangeNoThrow() const;
/**
* @brief Writes to a specific ledger.
*
* @param ledgerHeader Ledger header.
* @param blob r-value string serialization of ledger header.
*/
virtual void
writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) = 0;
/**
* @brief Writes a new ledger object.
*
* @param key The key to write the ledger object under
* @param seq The ledger sequence to write for
* @param blob The data to write
*/
virtual void
writeLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob);
/**
* @brief Writes a new transaction.
*
* @param hash The hash of the transaction
* @param seq The ledger sequence to write for
* @param date The timestamp of the entry
* @param transaction The transaction data to write
* @param metadata The metadata to write
*/
virtual void
writeTransaction(
std::string&& hash,
std::uint32_t seq,
std::uint32_t date,
std::string&& transaction,
std::string&& metadata
) = 0;
/**
* @brief Writes NFTs to the database.
*
* @param data A vector of NFTsData objects representing the NFTs
*/
virtual void
writeNFTs(std::vector<NFTsData> const& data) = 0;
/**
* @brief Write a new set of account transactions.
*
* @param data A vector of AccountTransactionsData objects representing the account transactions
*/
virtual void
writeAccountTransactions(std::vector<AccountTransactionsData> data) = 0;
/**
* @brief Write NFTs transactions.
*
* @param data A vector of NFTTransactionsData objects
*/
virtual void
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) = 0;
/**
* @brief Write a new successor.
*
* @param key Key of the object that the passed successor will be the successor for
* @param seq The ledger sequence to write for
* @param successor The successor data to write
*/
virtual void
writeSuccessor(std::string&& key, std::uint32_t seq, std::string&& successor) = 0;
/**
* @brief Starts a write transaction with the DB. No-op for cassandra.
*
* Note: Can potentially be deprecated and removed.
*/
virtual void
startWrites() const = 0;
/**
* @brief Tells database we finished writing all data for a specific ledger.
*
* Uses doFinishWrites to synchronize with the pending writes.
*
* @param ledgerSequence The ledger sequence to finish writing for
* @return true on success; false otherwise
*/
bool
finishWrites(std::uint32_t ledgerSequence);
/**
* @return true if database is overwhelmed; false otherwise
*/
virtual bool
isTooBusy() const = 0;
/**
* @return json object containing backend usage statistics
*/
virtual boost::json::object
stats() const = 0;
private:
virtual void
doWriteLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob) = 0;
virtual bool
doFinishWrites() = 0;
};
} // namespace data
using BackendInterface = data::BackendInterface;

View File

@@ -17,29 +17,38 @@
*/
//==============================================================================
/** @file */
#pragma once
#include <ripple/basics/Log.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/ledger/ReadView.h>
#include <ripple/protocol/SField.h>
#include <ripple/protocol/STAccount.h>
#include <ripple/protocol/TxMeta.h>
#include "util/Assert.hpp"
#include <boost/container/flat_set.hpp>
#include <ripple/basics/Blob.h>
#include <ripple/basics/Log.h>
#include <ripple/basics/StringUtilities.h>
#include <ripple/basics/base_uint.h>
#include <ripple/protocol/AccountID.h>
#include <ripple/protocol/SField.h>
#include <ripple/protocol/STAccount.h>
#include <ripple/protocol/STLedgerEntry.h>
#include <ripple/protocol/Serializer.h>
#include <ripple/protocol/TxMeta.h>
#include <backend/Types.h>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
/// Struct used to keep track of what to write to
/// account_transactions/account_tx tables
struct AccountTransactionsData
{
/**
* @brief Struct used to keep track of what to write to account_transactions/account_tx tables.
*/
struct AccountTransactionsData {
boost::container::flat_set<ripple::AccountID> accounts;
std::uint32_t ledgerSequence;
std::uint32_t transactionIndex;
std::uint32_t ledgerSequence{};
std::uint32_t transactionIndex{};
ripple::uint256 txHash;
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash, beast::Journal& j)
AccountTransactionsData(ripple::TxMeta& meta, ripple::uint256 const& txHash)
: accounts(meta.getAffectedAccounts())
, ledgerSequence(meta.getLgrSeq())
, transactionIndex(meta.getIndex())
@@ -50,10 +59,12 @@ struct AccountTransactionsData
AccountTransactionsData() = default;
};
/// Represents a link from a tx to an NFT that was targeted/modified/created
/// by it. Gets written to nf_token_transactions table and the like.
struct NFTTransactionsData
{
/**
* @brief Represents a link from a tx to an NFT that was targeted/modified/created by it.
*
* Gets written to nf_token_transactions table and the like.
*/
struct NFTTransactionsData {
ripple::uint256 tokenID;
std::uint32_t ledgerSequence;
std::uint32_t transactionIndex;
@@ -65,10 +76,12 @@ struct NFTTransactionsData
}
};
/// Represents an NFT state at a particular ledger. Gets written to nf_tokens
/// table and the like.
struct NFTsData
{
/**
* @brief Represents an NFT state at a particular ledger.
*
* Gets written to nf_tokens table and the like.
*/
struct NFTsData {
ripple::uint256 tokenID;
std::uint32_t ledgerSequence;
@@ -100,7 +113,8 @@ struct NFTsData
ripple::uint256 const& tokenID,
ripple::AccountID const& owner,
ripple::Blob const& uri,
ripple::TxMeta const& meta)
ripple::TxMeta const& meta
)
: tokenID(tokenID), ledgerSequence(meta.getLgrSeq()), transactionIndex(meta.getIndex()), owner(owner), uri(uri)
{
}
@@ -126,41 +140,68 @@ struct NFTsData
ripple::uint256 const& tokenID,
std::uint32_t const ledgerSequence,
ripple::AccountID const& owner,
ripple::Blob const& uri)
ripple::Blob const& uri
)
: tokenID(tokenID), ledgerSequence(ledgerSequence), owner(owner), uri(uri)
{
}
};
/**
* @brief Check whether the supplied object is an offer.
*
* @param object The object to check
* @return true if the object is an offer; false otherwise
*/
template <class T>
inline bool
isOffer(T const& object)
{
short offer_bytes = (object[1] << 8) | object[2];
return offer_bytes == 0x006f;
static constexpr short OFFER_OFFSET = 0x006f;
static constexpr short SHIFT = 8;
short offer_bytes = (object[1] << SHIFT) | object[2];
return offer_bytes == OFFER_OFFSET;
}
/**
* @brief Check whether the supplied hex represents an offer object.
*
* @param object The object to check
* @return true if the object is an offer; false otherwise
*/
template <class T>
inline bool
isOfferHex(T const& object)
{
auto blob = ripple::strUnHex(4, object.begin(), object.begin() + 4);
if (blob)
{
short offer_bytes = ((*blob)[1] << 8) | (*blob)[2];
return offer_bytes == 0x006f;
}
return isOffer(*blob);
return false;
}
/**
* @brief Check whether the supplied object is a dir node.
*
* @param object The object to check
* @return true if the object is a dir node; false otherwise
*/
template <class T>
inline bool
isDirNode(T const& object)
{
short spaceKey = (object.data()[1] << 8) | object.data()[2];
return spaceKey == 0x0064;
static constexpr short DIR_NODE_SPACE_KEY = 0x0064;
short const spaceKey = (object.data()[1] << 8) | object.data()[2];
return spaceKey == DIR_NODE_SPACE_KEY;
}
/**
* @brief Check whether the supplied object is a book dir.
*
* @param key The key into the object
* @param object The object to check
* @return true if the object is a book dir; false otherwise
*/
template <class T, class R>
inline bool
isBookDir(T const& key, R const& object)
@@ -172,55 +213,55 @@ isBookDir(T const& key, R const& object)
return !sle[~ripple::sfOwner].has_value();
}
/**
* @brief Get the book out of an offer object.
*
* @param offer The offer to get the book for
* @return Book as ripple::uint256
*/
template <class T>
inline ripple::uint256
getBook(T const& offer)
{
ripple::SerialIter it{offer.data(), offer.size()};
ripple::SLE sle{it, {}};
ripple::SLE const sle{it, {}};
ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory);
return book;
}
/**
* @brief Get the book base.
*
* @param key The key to get the book base out of
* @return Book base as ripple::uint256
*/
template <class T>
inline ripple::uint256
getBookBase(T const& key)
{
assert(key.size() == ripple::uint256::size());
static constexpr size_t KEY_SIZE = 24;
ASSERT(key.size() == ripple::uint256::size(), "Invalid key size {}", key.size());
ripple::uint256 ret;
for (size_t i = 0; i < 24; ++i)
{
for (size_t i = 0; i < KEY_SIZE; ++i)
ret.data()[i] = key.data()[i];
}
return ret;
}
inline ripple::LedgerInfo
deserializeHeader(ripple::Slice data)
{
ripple::SerialIter sit(data.data(), data.size());
ripple::LedgerInfo info;
info.seq = sit.get32();
info.drops = sit.get64();
info.parentHash = sit.get256();
info.txHash = sit.get256();
info.accountHash = sit.get256();
info.parentCloseTime = ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}};
info.closeTime = ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}};
info.closeTimeResolution = ripple::NetClock::duration{sit.get8()};
info.closeFlags = sit.get8();
info.hash = sit.get256();
return info;
}
/**
* @brief Stringify a ripple::uint256.
*
* @param input The input value
* @return The input value as a string
*/
inline std::string
uint256ToString(ripple::uint256 const& uint)
uint256ToString(ripple::uint256 const& input)
{
return {reinterpret_cast<const char*>(uint.data()), uint.size()};
return {reinterpret_cast<char const*>(input.data()), ripple::uint256::size()};
}
/** @brief The ripple epoch start timestamp. Midnight on 1st January 2000. */
static constexpr std::uint32_t rippleEpochStart = 946684800;

View File

@@ -17,74 +17,97 @@
*/
//==============================================================================
#include <backend/SimpleCache.h>
namespace Backend {
#include "data/LedgerCache.hpp"
#include "data/Types.hpp"
#include "util/Assert.hpp"
#include <ripple/basics/base_uint.h>
#include <cstddef>
#include <cstdint>
#include <mutex>
#include <optional>
#include <shared_mutex>
#include <vector>
namespace data {
uint32_t
SimpleCache::latestLedgerSequence() const
LedgerCache::latestLedgerSequence() const
{
std::shared_lock lck{mtx_};
std::shared_lock const lck{mtx_};
return latestSeq_;
}
void
SimpleCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground)
LedgerCache::waitUntilCacheContainsSeq(uint32_t seq)
{
if (disabled_)
return;
std::unique_lock lock(mtx_);
cv_.wait(lock, [this, seq] { return latestSeq_ >= seq; });
return;
}
void
LedgerCache::update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground)
{
if (disabled_)
return;
{
std::scoped_lock lck{mtx_};
if (seq > latestSeq_)
{
assert(seq == latestSeq_ + 1 || latestSeq_ == 0);
std::scoped_lock const lck{mtx_};
if (seq > latestSeq_) {
ASSERT(
seq == latestSeq_ + 1 || latestSeq_ == 0,
"New sequense must be either next or first. seq = {}, latestSeq_ = {}",
seq,
latestSeq_
);
latestSeq_ = seq;
}
for (auto const& obj : objs)
{
if (obj.blob.size())
{
if (isBackground && deletes_.count(obj.key))
for (auto const& obj : objs) {
if (!obj.blob.empty()) {
if (isBackground && deletes_.contains(obj.key))
continue;
auto& e = map_[obj.key];
if (seq > e.seq)
{
if (seq > e.seq) {
e = {seq, obj.blob};
}
}
else
{
} else {
map_.erase(obj.key);
if (!full_ && !isBackground)
deletes_.insert(obj.key);
}
}
cv_.notify_all();
}
}
std::optional<LedgerObject>
SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
{
if (!full_)
return {};
std::shared_lock{mtx_};
successorReqCounter_++;
std::shared_lock const lck{mtx_};
++successorReqCounter_.get();
if (seq != latestSeq_)
return {};
auto e = map_.upper_bound(key);
if (e == map_.end())
return {};
successorHitCounter_++;
++successorHitCounter_.get();
return {{e->first, e->second.blob}};
}
std::optional<LedgerObject>
SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
{
if (!full_)
return {};
std::shared_lock lck{mtx_};
std::shared_lock const lck{mtx_};
if (seq != latestSeq_)
return {};
auto e = map_.lower_bound(key);
@@ -93,62 +116,67 @@ SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
--e;
return {{e->first, e->second.blob}};
}
std::optional<Blob>
SimpleCache::get(ripple::uint256 const& key, uint32_t seq) const
LedgerCache::get(ripple::uint256 const& key, uint32_t seq) const
{
std::shared_lock lck{mtx_};
std::shared_lock const lck{mtx_};
if (seq > latestSeq_)
return {};
objectReqCounter_++;
++objectReqCounter_.get();
auto e = map_.find(key);
if (e == map_.end())
return {};
if (seq < e->second.seq)
return {};
objectHitCounter_++;
++objectHitCounter_.get();
return {e->second.blob};
}
void
SimpleCache::setDisabled()
LedgerCache::setDisabled()
{
disabled_ = true;
}
void
SimpleCache::setFull()
LedgerCache::setFull()
{
if (disabled_)
return;
full_ = true;
std::scoped_lock lck{mtx_};
std::scoped_lock const lck{mtx_};
deletes_.clear();
}
bool
SimpleCache::isFull() const
LedgerCache::isFull() const
{
return full_;
}
size_t
SimpleCache::size() const
LedgerCache::size() const
{
std::shared_lock lck{mtx_};
std::shared_lock const lck{mtx_};
return map_.size();
}
float
SimpleCache::getObjectHitRate() const
LedgerCache::getObjectHitRate() const
{
if (!objectReqCounter_)
if (objectReqCounter_.get().value() == 0u)
return 1;
return ((float)objectHitCounter_) / objectReqCounter_;
return static_cast<float>(objectHitCounter_.get().value()) / objectReqCounter_.get().value();
}
float
SimpleCache::getSuccessorHitRate() const
LedgerCache::getSuccessorHitRate() const
{
if (!successorReqCounter_)
if (successorReqCounter_.get().value() == 0u)
return 1;
return ((float)successorHitCounter_) / successorReqCounter_;
return static_cast<float>(successorHitCounter_.get().value()) / successorReqCounter_.get().value();
}
} // namespace Backend
} // namespace data

180
src/data/LedgerCache.hpp Normal file
View File

@@ -0,0 +1,180 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2022, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/Types.hpp"
#include "util/prometheus/Counter.hpp"
#include "util/prometheus/Label.hpp"
#include "util/prometheus/Prometheus.hpp"
#include <ripple/basics/base_uint.h>
#include <ripple/basics/hardened_hash.h>
#include <atomic>
#include <condition_variable>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <map>
#include <optional>
#include <shared_mutex>
#include <unordered_set>
#include <vector>
namespace data {
/**
* @brief Cache for an entire ledger.
*/
class LedgerCache {
struct CacheEntry {
uint32_t seq = 0;
Blob blob;
};
// counters for fetchLedgerObject(s) hit rate
std::reference_wrapper<util::prometheus::CounterInt> objectReqCounter_{PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "request"}, {"fetch", "ledger_objects"}}),
"LedgerCache statistics"
)};
std::reference_wrapper<util::prometheus::CounterInt> objectHitCounter_{PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "ledger_objects"}})
)};
// counters for fetchSuccessorKey hit rate
std::reference_wrapper<util::prometheus::CounterInt> successorReqCounter_{PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "request"}, {"fetch", "successor_key"}}),
"ledgerCache"
)};
std::reference_wrapper<util::prometheus::CounterInt> successorHitCounter_{PrometheusService::counterInt(
"ledger_cache_counter_total_number",
util::prometheus::Labels({{"type", "cache_hit"}, {"fetch", "successor_key"}})
)};
std::map<ripple::uint256, CacheEntry> map_;
mutable std::shared_mutex mtx_;
std::condition_variable_any cv_;
uint32_t latestSeq_ = 0;
std::atomic_bool full_ = false;
std::atomic_bool disabled_ = false;
// temporary set to prevent background thread from writing already deleted data. not used when cache is full
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
public:
/**
* @brief Update the cache with new ledger objects.
*
* @param blobs The ledger objects to update cache with
* @param seq The sequence to update cache for
* @param isBackground Should be set to true when writing old data from a background thread
*/
void
update(std::vector<LedgerObject> const& objs, uint32_t seq, bool isBackground = false);
/**
* @brief Fetch a cached object by its key and sequence number.
*
* @param key The key to fetch for
* @param seq The sequence to fetch for
* @return If found in cache, will return the cached Blob; otherwise nullopt is returned
*/
std::optional<Blob>
get(ripple::uint256 const& key, uint32_t seq) const;
/**
* @brief Gets a cached successor.
*
* Note: This function always returns std::nullopt when @ref isFull() returns false.
*
* @param key The key to fetch for
* @param seq The sequence to fetch for
* @return If found in cache, will return the cached successor; otherwise nullopt is returned
*/
std::optional<LedgerObject>
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
/**
* @brief Gets a cached predcessor.
*
* Note: This function always returns std::nullopt when @ref isFull() returns false.
*
* @param key The key to fetch for
* @param seq The sequence to fetch for
* @return If found in cache, will return the cached predcessor; otherwise nullopt is returned
*/
std::optional<LedgerObject>
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
/**
* @brief Disables the cache.
*/
void
setDisabled();
/**
* @brief Sets the full flag to true.
*
* This is used when cache loaded in its entirety at startup of the application. This can be either loaded from DB,
* populated together with initial ledger download (on first run) or downloaded from a peer node (specified in
* config).
*/
void
setFull();
/**
* @return The latest ledger sequence for which cache is available.
*/
uint32_t
latestLedgerSequence() const;
/**
* @return true if the cache has all data for the most recent ledger; false otherwise
*/
bool
isFull() const;
/**
* @return The total size of the cache.
*/
size_t
size() const;
/**
* @return A number representing the success rate of hitting an object in the cache versus missing it.
*/
float
getObjectHitRate() const;
/**
* @return A number representing the success rate of hitting a successor in the cache versus missing it.
*/
float
getSuccessorHitRate() const;
void
waitUntilCacheContainsSeq(uint32_t seq);
};
} // namespace data

View File

@@ -1,4 +1,5 @@
# Clio Backend
# Backend
## Background
The backend of Clio is responsible for handling the proper reading and writing of past ledger data from and to a given database. As of right now, Cassandra and ScyllaDB are the only supported databases that are production-ready. Support for database types can be easily extended by creating new implementations which implements the virtual methods of `BackendInterface.h`. Then, use the Factory Object Design Pattern to simply add logic statements to `BackendFactory.h` that return the new database interface for a specific `type` in Clio's configuration file.

View File

@@ -21,51 +21,60 @@
#include <ripple/basics/base_uint.h>
#include <ripple/protocol/AccountID.h>
#include <cstdint>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <vector>
namespace Backend {
// *** return types
namespace data {
using Blob = std::vector<unsigned char>;
struct LedgerObject
{
/**
* @brief Represents an object in the ledger.
*/
struct LedgerObject {
ripple::uint256 key;
Blob blob;
bool
operator==(const LedgerObject& other) const
operator==(LedgerObject const& other) const
{
return key == other.key && blob == other.blob;
}
};
struct LedgerPage
{
/**
* @brief Represents a page of LedgerObjects.
*/
struct LedgerPage {
std::vector<LedgerObject> objects;
std::optional<ripple::uint256> cursor;
};
struct BookOffersPage
{
/**
* @brief Represents a page of book offer objects.
*/
struct BookOffersPage {
std::vector<LedgerObject> offers;
std::optional<ripple::uint256> cursor;
};
struct TransactionAndMetadata
{
/**
* @brief Represents a transaction and its metadata bundled together.
*/
struct TransactionAndMetadata {
Blob transaction;
Blob metadata;
std::uint32_t ledgerSequence = 0;
std::uint32_t date = 0;
TransactionAndMetadata() = default;
TransactionAndMetadata(
Blob const& transaction,
Blob const& metadata,
std::uint32_t ledgerSequence,
std::uint32_t date)
: transaction{transaction}, metadata{metadata}, ledgerSequence{ledgerSequence}, date{date}
TransactionAndMetadata(Blob transaction, Blob metadata, std::uint32_t ledgerSequence, std::uint32_t date)
: transaction{std::move(transaction)}, metadata{std::move(metadata)}, ledgerSequence{ledgerSequence}, date{date}
{
}
@@ -78,17 +87,19 @@ struct TransactionAndMetadata
}
bool
operator==(const TransactionAndMetadata& other) const
operator==(TransactionAndMetadata const& other) const
{
return transaction == other.transaction && metadata == other.metadata &&
ledgerSequence == other.ledgerSequence && date == other.date;
}
};
struct TransactionsCursor
{
std::uint32_t ledgerSequence;
std::uint32_t transactionIndex;
/**
* @brief Represents a cursor into the transactions table.
*/
struct TransactionsCursor {
std::uint32_t ledgerSequence = 0;
std::uint32_t transactionIndex = 0;
TransactionsCursor() = default;
TransactionsCursor(std::uint32_t ledgerSequence, std::uint32_t transactionIndex)
@@ -101,9 +112,6 @@ struct TransactionsCursor
{
}
TransactionsCursor&
operator=(TransactionsCursor const&) = default;
bool
operator==(TransactionsCursor const& other) const = default;
@@ -114,27 +122,31 @@ struct TransactionsCursor
}
};
struct TransactionsAndCursor
{
/**
* @brief Represests a bundle of transactions with metadata and a cursor to the next page.
*/
struct TransactionsAndCursor {
std::vector<TransactionAndMetadata> txns;
std::optional<TransactionsCursor> cursor;
};
struct NFT
{
/**
* @brief Represents a NFToken.
*/
struct NFT {
ripple::uint256 tokenID;
std::uint32_t ledgerSequence;
std::uint32_t ledgerSequence{};
ripple::AccountID owner;
Blob uri;
bool isBurned;
bool isBurned{};
NFT() = default;
NFT(ripple::uint256 const& tokenID,
std::uint32_t ledgerSequence,
ripple::AccountID const& owner,
Blob const& uri,
Blob uri,
bool isBurned)
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{uri}, isBurned{isBurned}
: tokenID{tokenID}, ledgerSequence{ledgerSequence}, owner{owner}, uri{std::move(uri)}, isBurned{isBurned}
{
}
@@ -143,9 +155,8 @@ struct NFT
{
}
// clearly two tokens are the same if they have the same ID, but this
// struct stores the state of a given token at a given ledger sequence, so
// we also need to compare with ledgerSequence
// clearly two tokens are the same if they have the same ID, but this struct stores the state of a given token at a
// given ledger sequence, so we also need to compare with ledgerSequence.
bool
operator==(NFT const& other) const
{
@@ -153,12 +164,21 @@ struct NFT
}
};
struct LedgerRange
{
std::uint32_t minSequence;
std::uint32_t maxSequence;
struct NFTsAndCursor {
std::vector<NFT> nfts;
std::optional<ripple::uint256> cursor;
};
/**
* @brief Stores a range of sequences as a min and max pair.
*/
struct LedgerRange {
std::uint32_t minSequence = 0;
std::uint32_t maxSequence = 0;
};
constexpr ripple::uint256 firstKey{"0000000000000000000000000000000000000000000000000000000000000000"};
constexpr ripple::uint256 lastKey{"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
constexpr ripple::uint256 hi192{"0000000000000000000000000000000000000000000000001111111111111111"};
} // namespace Backend
} // namespace data

View File

@@ -0,0 +1,131 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/cassandra/Types.hpp"
#include <boost/asio/io_context.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/json.hpp>
#include <boost/json/object.hpp>
#include <chrono>
#include <concepts>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
namespace data::cassandra {
/**
* @brief The requirements of a settings provider.
*/
template <typename T>
concept SomeSettingsProvider = requires(T a) {
{
a.getSettings()
} -> std::same_as<Settings>;
{
a.getKeyspace()
} -> std::same_as<std::string>;
{
a.getTablePrefix()
} -> std::same_as<std::optional<std::string>>;
{
a.getReplicationFactor()
} -> std::same_as<uint16_t>;
{
a.getTtl()
} -> std::same_as<uint16_t>;
};
/**
* @brief The requirements of an execution strategy.
*/
template <typename T>
concept SomeExecutionStrategy = requires(
T a,
Settings settings,
Handle handle,
Statement statement,
std::vector<Statement> statements,
PreparedStatement prepared,
boost::asio::yield_context token
) {
{
T(settings, handle)
};
{
a.sync()
} -> std::same_as<void>;
{
a.isTooBusy()
} -> std::same_as<bool>;
{
a.writeSync(statement)
} -> std::same_as<ResultOrError>;
{
a.writeSync(prepared)
} -> std::same_as<ResultOrError>;
{
a.write(prepared)
} -> std::same_as<void>;
{
a.write(std::move(statements))
} -> std::same_as<void>;
{
a.read(token, prepared)
} -> std::same_as<ResultOrError>;
{
a.read(token, statement)
} -> std::same_as<ResultOrError>;
{
a.read(token, statements)
} -> std::same_as<ResultOrError>;
{
a.readEach(token, statements)
} -> std::same_as<std::vector<Result>>;
{
a.stats()
} -> std::same_as<boost::json::object>;
};
/**
* @brief The requirements of a retry policy.
*/
template <typename T>
concept SomeRetryPolicy = requires(T a, boost::asio::io_context ioc, CassandraError err, uint32_t attempt) {
{
T(ioc)
};
{
a.shouldRetry(err)
} -> std::same_as<bool>;
{
a.retry([]() {})
} -> std::same_as<void>;
{
a.calculateDelay(attempt)
} -> std::same_as<std::chrono::milliseconds>;
};
} // namespace data::cassandra

View File

@@ -21,34 +21,38 @@
#include <cassandra.h>
#include <cstdint>
#include <ostream>
#include <string>
#include <utility>
namespace Backend::Cassandra {
namespace data::cassandra {
/**
* @brief A simple container for both error message and error code
* @brief A simple container for both error message and error code.
*/
class CassandraError
{
class CassandraError {
std::string message_;
uint32_t code_;
uint32_t code_{};
public:
CassandraError() = default; // default constructible required by Expected
CassandraError(std::string message, uint32_t code) : message_{message}, code_{code}
CassandraError(std::string message, uint32_t code) : message_{std::move(message)}, code_{code}
{
}
template <typename T>
friend std::string
operator+(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v<T, std::string>
operator+(T const& lhs, CassandraError const& rhs)
requires std::is_convertible_v<T, std::string>
{
return lhs + rhs.message();
}
template <typename T>
friend bool
operator==(T const& lhs, CassandraError const& rhs) requires std::is_convertible_v<T, std::string>
operator==(T const& lhs, CassandraError const& rhs)
requires std::is_convertible_v<T, std::string>
{
return lhs == rhs.message();
}
@@ -67,28 +71,38 @@ public:
return os;
}
/**
* @return The final error message as a std::string
*/
std::string
message() const
{
return message_;
}
/**
* @return The error code
*/
uint32_t
code() const
{
return code_;
}
/**
* @return true if the wrapped error is considered a timeout; false otherwise
*/
bool
isTimeout() const
{
if (code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
return code_ == CASS_ERROR_LIB_NO_HOSTS_AVAILABLE or code_ == CASS_ERROR_LIB_REQUEST_TIMED_OUT or
code_ == CASS_ERROR_SERVER_UNAVAILABLE or code_ == CASS_ERROR_SERVER_OVERLOADED or
code_ == CASS_ERROR_SERVER_READ_TIMEOUT)
return true;
return false;
code_ == CASS_ERROR_SERVER_READ_TIMEOUT;
}
/**
* @return true if the wrapped error is an invalid query; false otherwise
*/
bool
isInvalidQuery() const
{
@@ -96,4 +110,4 @@ public:
}
};
} // namespace Backend::Cassandra
} // namespace data::cassandra

View File

@@ -17,9 +17,20 @@
*/
//==============================================================================
#include <backend/cassandra/Handle.h>
#include "data/cassandra/Handle.hpp"
namespace Backend::Cassandra {
#include "data/cassandra/Types.hpp"
#include <cassandra.h>
#include <functional>
#include <stdexcept>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
namespace data::cassandra {
Handle::Handle(Settings clusterSettings) : cluster_{clusterSettings}
{
@@ -88,17 +99,17 @@ std::vector<Handle::FutureType>
Handle::asyncExecuteEach(std::vector<Statement> const& statements) const
{
std::vector<Handle::FutureType> futures;
futures.reserve(statements.size());
for (auto const& statement : statements)
futures.push_back(cass_session_execute(session_, statement));
futures.emplace_back(cass_session_execute(session_, statement));
return futures;
}
Handle::MaybeErrorType
Handle::executeEach(std::vector<Statement> const& statements) const
{
for (auto futures = asyncExecuteEach(statements); auto const& future : futures)
{
if (auto const rc = future.await(); not rc)
for (auto futures = asyncExecuteEach(statements); auto const& future : futures) {
if (auto rc = future.await(); not rc)
return rc;
}
@@ -145,11 +156,12 @@ Handle::asyncExecute(std::vector<Statement> const& statements, std::function<voi
Handle::PreparedStatementType
Handle::prepare(std::string_view query) const
{
Handle::FutureType future = cass_session_prepare(session_, query.data());
if (auto const rc = future.await(); rc)
Handle::FutureType const future = cass_session_prepare(session_, query.data());
auto const rc = future.await();
if (rc)
return cass_future_get_prepared(future);
else
throw std::runtime_error(rc.error().message());
throw std::runtime_error(rc.error().message());
}
} // namespace Backend::Cassandra
} // namespace data::cassandra

View File

@@ -19,31 +19,28 @@
#pragma once
#include <backend/cassandra/Error.h>
#include <backend/cassandra/Types.h>
#include <backend/cassandra/impl/Batch.h>
#include <backend/cassandra/impl/Cluster.h>
#include <backend/cassandra/impl/Future.h>
#include <backend/cassandra/impl/ManagedObject.h>
#include <backend/cassandra/impl/Result.h>
#include <backend/cassandra/impl/Session.h>
#include <backend/cassandra/impl/Statement.h>
#include <util/Expected.h>
#include "data/cassandra/Error.hpp"
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/Batch.hpp"
#include "data/cassandra/impl/Cluster.hpp"
#include "data/cassandra/impl/Future.hpp"
#include "data/cassandra/impl/ManagedObject.hpp"
#include "data/cassandra/impl/Result.hpp"
#include "data/cassandra/impl/Session.hpp"
#include "data/cassandra/impl/Statement.hpp"
#include <cassandra.h>
#include <chrono>
#include <compare>
#include <iterator>
#include <functional>
#include <string_view>
#include <vector>
namespace Backend::Cassandra {
namespace data::cassandra {
/**
* @brief Represents a handle to the cassandra database cluster
*/
class Handle
{
class Handle {
detail::Cluster cluster_;
detail::Session session_;
@@ -57,28 +54,31 @@ public:
using ResultType = Result;
/**
* @brief Construct a new handle from a @ref Settings object
* @brief Construct a new handle from a @ref detail::Settings object.
*
* @param clusterSettings The settings to use
*/
explicit Handle(Settings clusterSettings = Settings::defaultSettings());
/**
* @brief Construct a new handle with default settings and only by setting
* the contact points
* @brief Construct a new handle with default settings and only by setting the contact points.
*
* @param contactPoints The contact points to use instead of settings
*/
explicit Handle(std::string_view contactPoints);
/**
* @brief Disconnects gracefully if possible
* @brief Disconnects gracefully if possible.
*/
~Handle();
/**
* @brief Move is supported
* @brief Move is supported.
*/
Handle(Handle&&) = default;
/**
* @brief Connect to the cluster asynchronously
* @brief Connect to the cluster asynchronously.
*
* @return A future
*/
@@ -86,31 +86,37 @@ public:
asyncConnect() const;
/**
* @brief Synchonous version of the above
* @brief Synchonous version of the above.
*
* See @ref asyncConnect() const for how this works.
*
* @return Possibly an error
*/
[[nodiscard]] MaybeErrorType
connect() const;
/**
* @brief Connect to the the specified keyspace asynchronously
* @brief Connect to the the specified keyspace asynchronously.
*
* @param keyspace The keyspace to use
* @return A future
*/
[[nodiscard]] FutureType
asyncConnect(std::string_view keyspace) const;
/**
* @brief Synchonous version of the above
* @brief Synchonous version of the above.
*
* See @ref asyncConnect(std::string_view) const for how this works.
*
* @param keyspace The keyspace to use
* @return Possibly an error
*/
[[nodiscard]] MaybeErrorType
connect(std::string_view keyspace) const;
/**
* @brief Disconnect from the cluster asynchronously
* @brief Disconnect from the cluster asynchronously.
*
* @return A future
*/
@@ -118,32 +124,40 @@ public:
asyncDisconnect() const;
/**
* @brief Synchonous version of the above
* @brief Synchonous version of the above.
*
* See @ref asyncDisconnect() const for how this works.
*
* @return Possibly an error
*/
[[maybe_unused]] MaybeErrorType
disconnect() const;
/**
* @brief Reconnect to the the specified keyspace asynchronously
* @brief Reconnect to the the specified keyspace asynchronously.
*
* @param keyspace The keyspace to use
* @return A future
*/
[[nodiscard]] FutureType
asyncReconnect(std::string_view keyspace) const;
/**
* @brief Synchonous version of the above
* @brief Synchonous version of the above.
*
* See @ref asyncReconnect(std::string_view) const for how this works.
*
* @param keyspace The keyspace to use
* @return Possibly an error
*/
[[nodiscard]] MaybeErrorType
reconnect(std::string_view keyspace) const;
/**
* @brief Execute a simple query with optional args asynchronously
* @brief Execute a simple query with optional args asynchronously.
*
* @param query The query to execute
* @param args The arguments to bind for execution
* @return A future
*/
template <typename... Args>
@@ -155,10 +169,13 @@ public:
}
/**
* @brief Synchonous version of the above
* @brief Synchonous version of the above.
*
* See @ref asyncExecute(std::string_view, Args&&...) const for how this
* works.
* See asyncExecute(std::string_view, Args&&...) const for how this works.
*
* @param query The query to execute
* @param args The arguments to bind for execution
* @return The result or an error
*/
template <typename... Args>
[[maybe_unused]] ResultOrErrorType
@@ -168,30 +185,34 @@ public:
}
/**
* @brief Execute each of the statements asynchronously
* @brief Execute each of the statements asynchronously.
*
* Batched version is not always the right option. Especially since it only
* supports INSERT, UPDATE and DELETE statements.
* This can be used as an alternative when statements need to execute in
* bulk.
* Batched version is not always the right option.
* Especially since it only supports INSERT, UPDATE and DELETE statements.
* This can be used as an alternative when statements need to execute in bulk.
*
* @param statements The statements to execute
* @return A vector of future objects
*/
[[nodiscard]] std::vector<FutureType>
asyncExecuteEach(std::vector<StatementType> const& statements) const;
/**
* @brief Synchonous version of the above
* @brief Synchonous version of the above.
*
* See @ref asyncExecuteEach(std::vector<StatementType> const&) const for
* how this works.
* See @ref asyncExecuteEach(std::vector<StatementType> const&) const for how this works.
*
* @param statements The statements to execute
* @return Possibly an error
*/
[[maybe_unused]] MaybeErrorType
executeEach(std::vector<StatementType> const& statements) const;
/**
* @brief Execute a prepared statement with optional args asynchronously
* @brief Execute a prepared statement with optional args asynchronously.
*
* @param statement The prepared statement to execute
* @param args The arguments to bind for execution
* @return A future
*/
template <typename... Args>
@@ -203,10 +224,13 @@ public:
}
/**
* @brief Synchonous version of the above
* @brief Synchonous version of the above.
*
* See @ref asyncExecute(std::vector<StatementType> const&, Args&&...) const
* for how this works.
* See asyncExecute(std::vector<StatementType> const&, Args&&...) const for how this works.
*
* @param statement The prepared statement to bind and execute
* @param args The arguments to bind for execution
* @return The result or an error
*/
template <typename... Args>
[[maybe_unused]] ResultOrErrorType
@@ -216,61 +240,70 @@ public:
}
/**
* @brief Execute one (bound or simple) statements asynchronously
* @brief Execute one (bound or simple) statements asynchronously.
*
* @param statement The statement to execute
* @return A future
*/
[[nodiscard]] FutureType
asyncExecute(StatementType const& statement) const;
/**
* @brief Execute one (bound or simple) statements asynchronously with a
* callback
* @brief Execute one (bound or simple) statements asynchronously with a callback.
*
* @param statement The statement to execute
* @param cb The callback to execute when data is ready
* @return A future that holds onto the callback provided
*/
[[nodiscard]] FutureWithCallbackType
asyncExecute(StatementType const& statement, std::function<void(ResultOrErrorType)>&& cb) const;
/**
* @brief Synchonous version of the above
* @brief Synchonous version of the above.
*
* See @ref asyncExecute(StatementType const&) const for how this
* works.
* See @ref asyncExecute(StatementType const&) const for how this works.
*
* @param statement The statement to execute
* @return The result or an error
*/
[[maybe_unused]] ResultOrErrorType
execute(StatementType const& statement) const;
/**
* @brief Execute a batch of (bound or simple) statements asynchronously
* @brief Execute a batch of (bound or simple) statements asynchronously.
*
* @param statements The statements to execute
* @return A future
*/
[[nodiscard]] FutureType
asyncExecute(std::vector<StatementType> const& statements) const;
/**
* @brief Synchonous version of the above
* @brief Synchonous version of the above.
*
* See @ref asyncExecute(std::vector<StatementType> const&) const for how
* this works.
* See @ref asyncExecute(std::vector<StatementType> const&) const for how this works.
*
* @param statements The statements to execute
* @return Possibly an error
*/
[[maybe_unused]] MaybeErrorType
execute(std::vector<StatementType> const& statements) const;
/**
* @brief Execute a batch of (bound or simple) statements asynchronously
* with a completion callback
* @brief Execute a batch of (bound or simple) statements asynchronously with a completion callback.
*
* @param statements The statements to execute
* @param cb The callback to execute when data is ready
* @return A future that holds onto the callback provided
*/
[[nodiscard]] FutureWithCallbackType
asyncExecute(std::vector<StatementType> const& statements, std::function<void(ResultOrErrorType)>&& cb) const;
/**
* @brief Prepare a statement
* @brief Prepare a statement.
*
* @return A @ref PreparedStatementType
* @param query
* @return A prepared statement
* @throws std::runtime_error with underlying error description on failure
*/
[[nodiscard]] PreparedStatementType
@@ -278,12 +311,13 @@ public:
};
/**
* @brief Extracts the results into series of std::tuple<Types...> by creating a
* simple wrapper with an STL input iterator inside.
* @brief Extracts the results into series of std::tuple<Types...> by creating a simple wrapper with an STL input
* iterator inside.
*
* You can call .begin() and .end() in order to iterate as usual.
* This also means that you can use it in a range-based for or with some
* algorithms.
* This also means that you can use it in a range-based for or with some algorithms.
*
* @param result The result to iterate
*/
template <typename... Types>
[[nodiscard]] detail::ResultExtractor<Types...>
@@ -292,4 +326,4 @@ extract(Handle::ResultType const& result)
return {result};
}
} // namespace Backend::Cassandra
} // namespace data::cassandra

View File

@@ -19,17 +19,20 @@
#pragma once
#include <backend/cassandra/Concepts.h>
#include <backend/cassandra/Handle.h>
#include <backend/cassandra/SettingsProvider.h>
#include <backend/cassandra/Types.h>
#include <config/Config.h>
#include <log/Logger.h>
#include <util/Expected.h>
#include "data/cassandra/Concepts.hpp"
#include "data/cassandra/Handle.hpp"
#include "data/cassandra/Types.hpp"
#include "util/log/Logger.hpp"
#include <fmt/compile.h>
namespace Backend::Cassandra {
#include <functional>
#include <memory>
#include <string>
#include <string_view>
#include <vector>
namespace data::cassandra {
template <SomeSettingsProvider SettingsProviderType>
[[nodiscard]] std::string inline qualifiedTableName(SettingsProviderType const& provider, std::string_view name)
@@ -38,17 +41,11 @@ template <SomeSettingsProvider SettingsProviderType>
}
/**
* @brief Manages the DB schema and provides access to prepared statements
* @brief Manages the DB schema and provides access to prepared statements.
*/
template <SomeSettingsProvider SettingsProviderType>
class Schema
{
// Current schema version.
// Update this everytime you update the schema.
// Migrations will be ran automatically based on this value.
static constexpr uint16_t version = 1u;
clio::Logger log_{"Backend"};
class Schema {
util::Logger log_{"Backend"};
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
public:
@@ -67,7 +64,8 @@ public:
AND durable_writes = true
)",
settingsProvider_.get().getKeyspace(),
settingsProvider_.get().getReplicationFactor());
settingsProvider_.get().getReplicationFactor()
);
}();
// =======================
@@ -90,7 +88,8 @@ public:
AND default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "objects"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
statements.emplace_back(fmt::format(
R"(
@@ -105,7 +104,8 @@ public:
WITH default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "transactions"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
statements.emplace_back(fmt::format(
R"(
@@ -118,7 +118,8 @@ public:
WITH default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "ledger_transactions"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
statements.emplace_back(fmt::format(
R"(
@@ -132,7 +133,8 @@ public:
WITH default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "successor"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
statements.emplace_back(fmt::format(
R"(
@@ -145,7 +147,8 @@ public:
WITH default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "diff"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
statements.emplace_back(fmt::format(
R"(
@@ -160,7 +163,8 @@ public:
AND default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "account_tx"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
statements.emplace_back(fmt::format(
R"(
@@ -172,7 +176,8 @@ public:
WITH default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "ledgers"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
statements.emplace_back(fmt::format(
R"(
@@ -184,7 +189,8 @@ public:
WITH default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "ledger_hashes"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
statements.emplace_back(fmt::format(
R"(
@@ -194,7 +200,8 @@ public:
sequence bigint
)
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
qualifiedTableName(settingsProvider_.get(), "ledger_range")
));
statements.emplace_back(fmt::format(
R"(
@@ -210,7 +217,8 @@ public:
AND default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "nf_tokens"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
statements.emplace_back(fmt::format(
R"(
@@ -225,7 +233,8 @@ public:
AND default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
statements.emplace_back(fmt::format(
R"(
@@ -240,7 +249,8 @@ public:
AND default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "nf_token_uris"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
statements.emplace_back(fmt::format(
R"(
@@ -255,16 +265,16 @@ public:
AND default_time_to_live = {}
)",
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions"),
settingsProvider_.get().getTtl()));
settingsProvider_.get().getTtl()
));
return statements;
}();
/**
* @brief Prepared statements holder
* @brief Prepared statements holder.
*/
class Statements
{
class Statements {
std::reference_wrapper<SettingsProviderType const> settingsProvider_;
std::reference_wrapper<Handle const> handle_;
@@ -285,7 +295,8 @@ public:
(key, sequence, object)
VALUES (?, ?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "objects")));
qualifiedTableName(settingsProvider_.get(), "objects")
));
}();
PreparedStatement insertTransaction = [this]() {
@@ -295,7 +306,8 @@ public:
(hash, ledger_sequence, date, transaction, metadata)
VALUES (?, ?, ?, ?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "transactions")));
qualifiedTableName(settingsProvider_.get(), "transactions")
));
}();
PreparedStatement insertLedgerTransaction = [this]() {
@@ -305,7 +317,8 @@ public:
(ledger_sequence, hash)
VALUES (?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")));
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")
));
}();
PreparedStatement insertSuccessor = [this]() {
@@ -315,7 +328,8 @@ public:
(key, seq, next)
VALUES (?, ?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "successor")));
qualifiedTableName(settingsProvider_.get(), "successor")
));
}();
PreparedStatement insertDiff = [this]() {
@@ -325,7 +339,8 @@ public:
(seq, key)
VALUES (?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "diff")));
qualifiedTableName(settingsProvider_.get(), "diff")
));
}();
PreparedStatement insertAccountTx = [this]() {
@@ -335,7 +350,8 @@ public:
(account, seq_idx, hash)
VALUES (?, ?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")));
qualifiedTableName(settingsProvider_.get(), "account_tx")
));
}();
PreparedStatement insertNFT = [this]() {
@@ -345,7 +361,8 @@ public:
(token_id, sequence, owner, is_burned)
VALUES (?, ?, ?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "nf_tokens")));
qualifiedTableName(settingsProvider_.get(), "nf_tokens")
));
}();
PreparedStatement insertIssuerNFT = [this]() {
@@ -355,7 +372,8 @@ public:
(issuer, taxon, token_id)
VALUES (?, ?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")));
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
));
}();
PreparedStatement insertNFTURI = [this]() {
@@ -365,7 +383,8 @@ public:
(token_id, sequence, uri)
VALUES (?, ?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")));
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")
));
}();
PreparedStatement insertNFTTx = [this]() {
@@ -375,7 +394,8 @@ public:
(token_id, seq_idx, hash)
VALUES (?, ?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
));
}();
PreparedStatement insertLedgerHeader = [this]() {
@@ -385,7 +405,8 @@ public:
(sequence, header)
VALUES (?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "ledgers")));
qualifiedTableName(settingsProvider_.get(), "ledgers")
));
}();
PreparedStatement insertLedgerHash = [this]() {
@@ -395,7 +416,8 @@ public:
(hash, sequence)
VALUES (?, ?)
)",
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")));
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")
));
}();
//
@@ -410,7 +432,8 @@ public:
WHERE is_latest = ?
IF sequence IN (?, null)
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
qualifiedTableName(settingsProvider_.get(), "ledger_range")
));
}();
PreparedStatement deleteLedgerRange = [this]() {
@@ -420,7 +443,8 @@ public:
SET sequence = ?
WHERE is_latest = false
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
qualifiedTableName(settingsProvider_.get(), "ledger_range")
));
}();
//
@@ -437,7 +461,8 @@ public:
ORDER BY seq DESC
LIMIT 1
)",
qualifiedTableName(settingsProvider_.get(), "successor")));
qualifiedTableName(settingsProvider_.get(), "successor")
));
}();
PreparedStatement selectDiff = [this]() {
@@ -447,7 +472,8 @@ public:
FROM {}
WHERE seq = ?
)",
qualifiedTableName(settingsProvider_.get(), "diff")));
qualifiedTableName(settingsProvider_.get(), "diff")
));
}();
PreparedStatement selectObject = [this]() {
@@ -460,7 +486,8 @@ public:
ORDER BY sequence DESC
LIMIT 1
)",
qualifiedTableName(settingsProvider_.get(), "objects")));
qualifiedTableName(settingsProvider_.get(), "objects")
));
}();
PreparedStatement selectTransaction = [this]() {
@@ -470,7 +497,8 @@ public:
FROM {}
WHERE hash = ?
)",
qualifiedTableName(settingsProvider_.get(), "transactions")));
qualifiedTableName(settingsProvider_.get(), "transactions")
));
}();
PreparedStatement selectAllTransactionHashesInLedger = [this]() {
@@ -480,7 +508,8 @@ public:
FROM {}
WHERE ledger_sequence = ?
)",
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")));
qualifiedTableName(settingsProvider_.get(), "ledger_transactions")
));
}();
PreparedStatement selectLedgerPageKeys = [this]() {
@@ -494,7 +523,8 @@ public:
LIMIT ?
ALLOW FILTERING
)",
qualifiedTableName(settingsProvider_.get(), "objects")));
qualifiedTableName(settingsProvider_.get(), "objects")
));
}();
PreparedStatement selectLedgerPage = [this]() {
@@ -508,7 +538,8 @@ public:
LIMIT ?
ALLOW FILTERING
)",
qualifiedTableName(settingsProvider_.get(), "objects")));
qualifiedTableName(settingsProvider_.get(), "objects")
));
}();
PreparedStatement getToken = [this]() {
@@ -519,7 +550,8 @@ public:
WHERE key = ?
LIMIT 1
)",
qualifiedTableName(settingsProvider_.get(), "objects")));
qualifiedTableName(settingsProvider_.get(), "objects")
));
}();
PreparedStatement selectAccountTx = [this]() {
@@ -528,10 +560,11 @@ public:
SELECT hash, seq_idx
FROM {}
WHERE account = ?
AND seq_idx <= ?
AND seq_idx < ?
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")));
qualifiedTableName(settingsProvider_.get(), "account_tx")
));
}();
PreparedStatement selectAccountTxForward = [this]() {
@@ -540,11 +573,12 @@ public:
SELECT hash, seq_idx
FROM {}
WHERE account = ?
AND seq_idx >= ?
AND seq_idx > ?
ORDER BY seq_idx ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "account_tx")));
qualifiedTableName(settingsProvider_.get(), "account_tx")
));
}();
PreparedStatement selectNFT = [this]() {
@@ -557,7 +591,8 @@ public:
ORDER BY sequence DESC
LIMIT 1
)",
qualifiedTableName(settingsProvider_.get(), "nf_tokens")));
qualifiedTableName(settingsProvider_.get(), "nf_tokens")
));
}();
PreparedStatement selectNFTURI = [this]() {
@@ -570,7 +605,8 @@ public:
ORDER BY sequence DESC
LIMIT 1
)",
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")));
qualifiedTableName(settingsProvider_.get(), "nf_token_uris")
));
}();
PreparedStatement selectNFTTx = [this]() {
@@ -583,7 +619,8 @@ public:
ORDER BY seq_idx DESC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
));
}();
PreparedStatement selectNFTTxForward = [this]() {
@@ -596,7 +633,37 @@ public:
ORDER BY seq_idx ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")));
qualifiedTableName(settingsProvider_.get(), "nf_token_transactions")
));
}();
PreparedStatement selectNFTIDsByIssuer = [this]() {
return handle_.get().prepare(fmt::format(
R"(
SELECT token_id
FROM {}
WHERE issuer = ?
AND (taxon, token_id) > ?
ORDER BY taxon ASC, token_id ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
));
}();
PreparedStatement selectNFTIDsByIssuerTaxon = [this]() {
return handle_.get().prepare(fmt::format(
R"(
SELECT token_id
FROM {}
WHERE issuer = ?
AND taxon = ?
AND token_id > ?
ORDER BY taxon ASC, token_id ASC
LIMIT ?
)",
qualifiedTableName(settingsProvider_.get(), "issuer_nf_tokens_v2")
));
}();
PreparedStatement selectLedgerByHash = [this]() {
@@ -607,7 +674,8 @@ public:
WHERE hash = ?
LIMIT 1
)",
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")));
qualifiedTableName(settingsProvider_.get(), "ledger_hashes")
));
}();
PreparedStatement selectLedgerBySeq = [this]() {
@@ -617,7 +685,8 @@ public:
FROM {}
WHERE sequence = ?
)",
qualifiedTableName(settingsProvider_.get(), "ledgers")));
qualifiedTableName(settingsProvider_.get(), "ledgers")
));
}();
PreparedStatement selectLatestLedger = [this]() {
@@ -627,7 +696,8 @@ public:
FROM {}
WHERE is_latest = true
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
qualifiedTableName(settingsProvider_.get(), "ledger_range")
));
}();
PreparedStatement selectLedgerRange = [this]() {
@@ -636,23 +706,24 @@ public:
SELECT sequence
FROM {}
)",
qualifiedTableName(settingsProvider_.get(), "ledger_range")));
qualifiedTableName(settingsProvider_.get(), "ledger_range")
));
}();
};
/**
* @brief Recreates the prepared statements
* @brief Recreates the prepared statements.
*/
void
prepareStatements(Handle const& handle)
{
log_.info() << "Preparing cassandra statements";
LOG(log_.info()) << "Preparing cassandra statements";
statements_ = std::make_unique<Statements>(settingsProvider_, handle);
log_.info() << "Finished preparing statements";
LOG(log_.info()) << "Finished preparing statements";
}
/**
* @brief Provides access to statements
* @brief Provides access to statements.
*/
std::unique_ptr<Statements> const&
operator->() const
@@ -664,4 +735,4 @@ private:
std::unique_ptr<Statements> statements_{nullptr};
};
} // namespace Backend::Cassandra
} // namespace data::cassandra

View File

@@ -17,28 +17,40 @@
*/
//==============================================================================
#include <backend/cassandra/SettingsProvider.h>
#include <backend/cassandra/impl/Cluster.h>
#include <backend/cassandra/impl/Statement.h>
#include <config/Config.h>
#include "data/cassandra/SettingsProvider.hpp"
#include <boost/json.hpp>
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/Cluster.hpp"
#include "util/Constants.hpp"
#include "util/config/Config.hpp"
#include <boost/json/conversion.hpp>
#include <boost/json/value.hpp>
#include <cerrno>
#include <chrono>
#include <cstddef>
#include <cstdint>
#include <filesystem>
#include <fstream>
#include <ios>
#include <iterator>
#include <optional>
#include <stdexcept>
#include <string>
#include <thread>
#include <system_error>
namespace Backend::Cassandra {
namespace data::cassandra {
namespace detail {
inline Settings::ContactPoints
tag_invoke(boost::json::value_to_tag<Settings::ContactPoints>, boost::json::value const& value)
{
if (not value.is_object())
throw std::runtime_error(
"Feed entire Cassandra section to parse "
"Settings::ContactPoints instead");
if (not value.is_object()) {
throw std::runtime_error("Feed entire Cassandra section to parse Settings::ContactPoints instead");
}
clio::Config obj{value};
util::Config const obj{value};
Settings::ContactPoints out;
out.contactPoints = obj.valueOrThrow<std::string>("contact_points", "`contact_points` must be a string");
@@ -56,7 +68,7 @@ tag_invoke(boost::json::value_to_tag<Settings::SecureConnectionBundle>, boost::j
}
} // namespace detail
SettingsProvider::SettingsProvider(clio::Config const& cfg, uint16_t ttl)
SettingsProvider::SettingsProvider(util::Config const& cfg, uint16_t ttl)
: config_{cfg}
, keyspace_{cfg.valueOr<std::string>("keyspace", "clio")}
, tablePrefix_{cfg.maybeValue<std::string>("table_prefix")}
@@ -75,18 +87,15 @@ SettingsProvider::getSettings() const
std::optional<std::string>
SettingsProvider::parseOptionalCertificate() const
{
if (auto const certPath = config_.maybeValue<std::string>("certfile"); certPath)
{
if (auto const certPath = config_.maybeValue<std::string>("certfile"); certPath) {
auto const path = std::filesystem::path(*certPath);
std::ifstream fileStream(path.string(), std::ios::in);
if (!fileStream)
{
if (!fileStream) {
throw std::system_error(errno, std::generic_category(), "Opening certificate " + path.string());
}
std::string contents(std::istreambuf_iterator<char>{fileStream}, std::istreambuf_iterator<char>{});
if (fileStream.bad())
{
if (fileStream.bad()) {
throw std::system_error(errno, std::generic_category(), "Reading certificate " + path.string());
}
@@ -100,12 +109,9 @@ Settings
SettingsProvider::parseSettings() const
{
auto settings = Settings::defaultSettings();
if (auto const bundle = config_.maybeValue<Settings::SecureConnectionBundle>("secure_connect_bundle"); bundle)
{
if (auto const bundle = config_.maybeValue<Settings::SecureConnectionBundle>("secure_connect_bundle"); bundle) {
settings.connectionInfo = *bundle;
}
else
{
} else {
settings.connectionInfo =
config_.valueOrThrow<Settings::ContactPoints>("Missing contact_points in Cassandra config");
}
@@ -115,6 +121,19 @@ SettingsProvider::parseSettings() const
config_.valueOr<uint32_t>("max_write_requests_outstanding", settings.maxWriteRequestsOutstanding);
settings.maxReadRequestsOutstanding =
config_.valueOr<uint32_t>("max_read_requests_outstanding", settings.maxReadRequestsOutstanding);
settings.coreConnectionsPerHost =
config_.valueOr<uint32_t>("core_connections_per_host", settings.coreConnectionsPerHost);
settings.queueSizeIO = config_.maybeValue<uint32_t>("queue_size_io");
settings.writeBatchSize = config_.valueOr<std::size_t>("write_batch_size", settings.writeBatchSize);
auto const connectTimeoutSecond = config_.maybeValue<uint32_t>("connect_timeout");
if (connectTimeoutSecond)
settings.connectionTimeout = std::chrono::milliseconds{*connectTimeoutSecond * util::MILLISECONDS_PER_SECOND};
auto const requestTimeoutSecond = config_.maybeValue<uint32_t>("request_timeout");
if (requestTimeoutSecond)
settings.requestTimeout = std::chrono::milliseconds{*requestTimeoutSecond * util::MILLISECONDS_PER_SECOND};
settings.certificate = parseOptionalCertificate();
settings.username = config_.maybeValue<std::string>("username");
settings.password = config_.maybeValue<std::string>("password");
@@ -122,4 +141,4 @@ SettingsProvider::parseSettings() const
return settings;
}
} // namespace Backend::Cassandra
} // namespace data::cassandra

View File

@@ -19,20 +19,22 @@
#pragma once
#include <backend/cassandra/Handle.h>
#include <backend/cassandra/Types.h>
#include <config/Config.h>
#include <log/Logger.h>
#include <util/Expected.h>
#include "data/cassandra/Handle.hpp"
#include "data/cassandra/Types.hpp"
#include "util/config/Config.hpp"
#include "util/log/Logger.hpp"
namespace Backend::Cassandra {
#include <cstdint>
#include <optional>
#include <string>
namespace data::cassandra {
/**
* @brief Provides settings for @ref CassandraBackend
* @brief Provides settings for @ref BasicCassandraBackend.
*/
class SettingsProvider
{
clio::Config config_;
class SettingsProvider {
util::Config config_;
std::string keyspace_;
std::optional<std::string> tablePrefix_;
@@ -41,34 +43,50 @@ class SettingsProvider
Settings settings_;
public:
explicit SettingsProvider(clio::Config const& cfg, uint16_t ttl = 0);
/**
* @brief Create a settings provider from the specified config.
*
* @param cfg The config of Clio to use
* @param ttl Time to live setting
*/
explicit SettingsProvider(util::Config const& cfg, uint16_t ttl = 0);
/*! Get the cluster settings */
/**
* @return The cluster settings
*/
[[nodiscard]] Settings
getSettings() const;
/*! Get the specified keyspace */
/**
* @return The specified keyspace
*/
[[nodiscard]] inline std::string
getKeyspace() const
{
return keyspace_;
}
/*! Get an optional table prefix to use in all queries */
/**
* @return The optional table prefix to use in all queries
*/
[[nodiscard]] inline std::optional<std::string>
getTablePrefix() const
{
return tablePrefix_;
}
/*! Get the replication factor */
/**
* @return The replication factor
*/
[[nodiscard]] inline uint16_t
getReplicationFactor() const
{
return replicationFactor_;
}
/*! Get the default time to live to use in all `create` queries */
/**
* @return The default time to live to use in all `create` queries
*/
[[nodiscard]] inline uint16_t
getTtl() const
{
@@ -83,4 +101,4 @@ private:
parseSettings() const;
};
} // namespace Backend::Cassandra
} // namespace data::cassandra

View File

@@ -19,11 +19,11 @@
#pragma once
#include <util/Expected.h>
#include "util/Expected.hpp"
#include <string>
#include <cstdint>
namespace Backend::Cassandra {
namespace data::cassandra {
namespace detail {
struct Settings;
@@ -52,8 +52,7 @@ using Batch = detail::Batch;
* because clio uses bigint (int64) everywhere except for when one need
* to specify LIMIT, which needs an int32 :-/
*/
struct Limit
{
struct Limit {
int32_t limit;
};
@@ -64,4 +63,4 @@ using MaybeError = util::Expected<void, CassandraError>;
using ResultOrError = util::Expected<Result, CassandraError>;
using Error = util::Unexpected<CassandraError>;
} // namespace Backend::Cassandra
} // namespace data::cassandra

View File

@@ -19,19 +19,22 @@
#pragma once
#include <backend/cassandra/Concepts.h>
#include <backend/cassandra/Handle.h>
#include <backend/cassandra/Types.h>
#include <backend/cassandra/impl/RetryPolicy.h>
#include <log/Logger.h>
#include <util/Expected.h>
#include "data/cassandra/Concepts.hpp"
#include "data/cassandra/Handle.hpp"
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/RetryPolicy.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio.hpp>
#include <boost/asio/io_context.hpp>
#include <functional>
#include <memory>
#include <mutex>
#include <optional>
#include <utility>
namespace Backend::Cassandra::detail {
namespace data::cassandra::detail {
/**
* @brief A query executor with a changable retry policy
@@ -48,16 +51,17 @@ template <
typename StatementType,
typename HandleType = Handle,
SomeRetryPolicy RetryPolicyType = ExponentialBackoffRetryPolicy>
class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<StatementType, HandleType, RetryPolicyType>>
{
class AsyncExecutor : public std::enable_shared_from_this<AsyncExecutor<StatementType, HandleType, RetryPolicyType>> {
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
using CallbackType = std::function<void(typename HandleType::ResultOrErrorType)>;
using RetryCallbackType = std::function<void()>;
clio::Logger log_{"Backend"};
util::Logger log_{"Backend"};
StatementType data_;
RetryPolicyType retryPolicy_;
CallbackType onComplete_;
RetryCallbackType onRetry_;
// does not exist during initial construction, hence optional
std::optional<FutureWithCallbackType> future_;
@@ -68,24 +72,37 @@ public:
* @brief Create a new instance of the AsyncExecutor and execute it.
*/
static void
run(boost::asio::io_context& ioc, HandleType const& handle, StatementType&& data, CallbackType&& onComplete)
run(boost::asio::io_context& ioc,
HandleType const& handle,
StatementType&& data,
CallbackType&& onComplete,
RetryCallbackType&& onRetry)
{
// this is a helper that allows us to use std::make_shared below
struct EnableMakeShared : public AsyncExecutor<StatementType, HandleType, RetryPolicyType>
{
EnableMakeShared(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
: AsyncExecutor(ioc, std::move(data), std::move(onComplete))
struct EnableMakeShared : public AsyncExecutor<StatementType, HandleType, RetryPolicyType> {
EnableMakeShared(
boost::asio::io_context& ioc,
StatementType&& data,
CallbackType&& onComplete,
RetryCallbackType&& onRetry
)
: AsyncExecutor(ioc, std::move(data), std::move(onComplete), std::move(onRetry))
{
}
};
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete));
auto ptr = std::make_shared<EnableMakeShared>(ioc, std::move(data), std::move(onComplete), std::move(onRetry));
ptr->execute(handle);
}
private:
AsyncExecutor(boost::asio::io_context& ioc, StatementType&& data, CallbackType&& onComplete)
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}
AsyncExecutor(
boost::asio::io_context& ioc,
StatementType&& data,
CallbackType&& onComplete,
RetryCallbackType&& onRetry
)
: data_{std::move(data)}, retryPolicy_{ioc}, onComplete_{std::move(onComplete)}, onRetry_{std::move(onRetry)}
{
}
@@ -96,24 +113,23 @@ private:
// lifetime is extended by capturing self ptr
auto handler = [this, &handle, self](auto&& res) mutable {
if (res)
{
onComplete_(std::move(res));
}
else
{
if (retryPolicy_.shouldRetry(res.error()))
if (res) {
onComplete_(std::forward<decltype(res)>(res));
} else {
if (retryPolicy_.shouldRetry(res.error())) {
onRetry_();
retryPolicy_.retry([self, &handle]() { self->execute(handle); });
else
onComplete_(std::move(res)); // report error
} else {
onComplete_(std::forward<decltype(res)>(res)); // report error
}
}
self = nullptr; // explicitly decrement refcount
};
std::scoped_lock lck{mtx_};
std::scoped_lock const lck{mtx_};
future_.emplace(handle.asyncExecute(data_, std::move(handler)));
}
};
} // namespace Backend::Cassandra::detail
} // namespace data::cassandra::detail

View File

@@ -17,40 +17,44 @@
*/
//==============================================================================
#include <backend/cassandra/Error.h>
#include <backend/cassandra/impl/Batch.h>
#include <backend/cassandra/impl/Statement.h>
#include <util/Expected.h>
#include "data/cassandra/impl/Batch.hpp"
#include <exception>
#include "data/cassandra/Error.hpp"
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/ManagedObject.hpp"
#include "data/cassandra/impl/Statement.hpp"
#include "util/Expected.hpp"
#include <cassandra.h>
#include <stdexcept>
#include <vector>
namespace {
static constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
};
constexpr auto batchDeleter = [](CassBatch* ptr) { cass_batch_free(ptr); };
} // namespace
namespace Backend::Cassandra::detail {
namespace data::cassandra::detail {
// todo: use an appropritae value instead of CASS_BATCH_TYPE_LOGGED for
// different use cases
// TODO: Use an appropriate value instead of CASS_BATCH_TYPE_LOGGED for different use cases
Batch::Batch(std::vector<Statement> const& statements)
: ManagedObject{cass_batch_new(CASS_BATCH_TYPE_LOGGED), batchDeleter}
{
cass_batch_set_is_idempotent(*this, cass_true);
for (auto const& statement : statements)
for (auto const& statement : statements) {
if (auto const res = add(statement); not res)
throw std::runtime_error("Failed to add statement to batch: " + res.error());
}
}
MaybeError
Batch::add(Statement const& statement)
{
if (auto const rc = cass_batch_add_statement(*this, statement); rc != CASS_OK)
{
if (auto const rc = cass_batch_add_statement(*this, statement); rc != CASS_OK) {
return Error{CassandraError{cass_error_desc(rc), rc}};
}
return {};
}
} // namespace Backend::Cassandra::detail
} // namespace data::cassandra::detail

View File

@@ -19,19 +19,20 @@
#pragma once
#include <backend/cassandra/Types.h>
#include <backend/cassandra/impl/ManagedObject.h>
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/ManagedObject.hpp"
#include <cassandra.h>
namespace Backend::Cassandra::detail {
#include <vector>
struct Batch : public ManagedObject<CassBatch>
{
namespace data::cassandra::detail {
struct Batch : public ManagedObject<CassBatch> {
Batch(std::vector<Statement> const& statements);
MaybeError
add(Statement const& statement);
};
} // namespace Backend::Cassandra::detail
} // namespace data::cassandra::detail

View File

@@ -17,20 +17,24 @@
*/
//==============================================================================
#include <backend/cassandra/impl/Cluster.h>
#include <backend/cassandra/impl/SslContext.h>
#include <backend/cassandra/impl/Statement.h>
#include <util/Expected.h>
#include "data/cassandra/impl/Cluster.hpp"
#include <exception>
#include <vector>
#include "data/cassandra/impl/ManagedObject.hpp"
#include "data/cassandra/impl/SslContext.hpp"
#include "util/log/Logger.hpp"
#include <cassandra.h>
#include <fmt/core.h>
#include <stdexcept>
#include <string>
#include <variant>
namespace {
static constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
constexpr auto clusterDeleter = [](CassCluster* ptr) { cass_cluster_free(ptr); };
template <class... Ts>
struct overloadSet : Ts...
{
struct overloadSet : Ts... {
using Ts::operator()...;
};
@@ -39,53 +43,47 @@ template <class... Ts>
overloadSet(Ts...) -> overloadSet<Ts...>;
}; // namespace
namespace Backend::Cassandra::detail {
namespace data::cassandra::detail {
Cluster::Cluster(Settings const& settings) : ManagedObject{cass_cluster_new(), clusterDeleter}
{
using std::to_string;
cass_cluster_set_token_aware_routing(*this, cass_true);
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK)
{
throw std::runtime_error(std::string{"Error setting cassandra protocol version to v4: "} + cass_error_desc(rc));
if (auto const rc = cass_cluster_set_protocol_version(*this, CASS_PROTOCOL_VERSION_V4); rc != CASS_OK) {
throw std::runtime_error(fmt::format("Error setting cassandra protocol version to v4: {}", cass_error_desc(rc))
);
}
if (auto const rc = cass_cluster_set_num_threads_io(*this, settings.threads); rc != CASS_OK)
{
if (auto const rc = cass_cluster_set_num_threads_io(*this, settings.threads); rc != CASS_OK) {
throw std::runtime_error(
std::string{"Error setting cassandra io threads to "} + to_string(settings.threads) + ": " +
cass_error_desc(rc));
fmt::format("Error setting cassandra io threads to {}: {}", settings.threads, cass_error_desc(rc))
);
}
cass_log_set_level(settings.enableLog ? CASS_LOG_TRACE : CASS_LOG_DISABLED);
cass_cluster_set_connect_timeout(*this, settings.connectionTimeout.count());
cass_cluster_set_request_timeout(*this, settings.requestTimeout.count());
// TODO: other options to experiment with and consider later:
// cass_cluster_set_max_concurrent_requests_threshold(*this, 10000);
// cass_cluster_set_queue_size_event(*this, 100000);
// cass_cluster_set_queue_size_io(*this, 100000);
// cass_cluster_set_write_bytes_high_water_mark(*this, 16 * 1024 * 1024); // 16mb
// cass_cluster_set_write_bytes_low_water_mark(*this, 8 * 1024 * 1024); // half of allowance
// cass_cluster_set_pending_requests_high_water_mark(*this, 5000);
// cass_cluster_set_pending_requests_low_water_mark(*this, 2500); // half
// cass_cluster_set_max_requests_per_flush(*this, 1000);
// cass_cluster_set_max_concurrent_creation(*this, 8);
// cass_cluster_set_max_connections_per_host(*this, 6);
// cass_cluster_set_core_connections_per_host(*this, 4);
// cass_cluster_set_constant_speculative_execution_policy(*this, 1000, 1024);
if (auto const rc = cass_cluster_set_core_connections_per_host(*this, settings.coreConnectionsPerHost);
rc != CASS_OK) {
throw std::runtime_error(fmt::format("Could not set core connections per host: {}", cass_error_desc(rc)));
}
if (auto const rc = cass_cluster_set_queue_size_io(
*this, settings.maxWriteRequestsOutstanding + settings.maxReadRequestsOutstanding);
rc != CASS_OK)
{
throw std::runtime_error(std::string{"Could not set queue size for IO per host: "} + cass_error_desc(rc));
auto const queueSize =
settings.queueSizeIO.value_or(settings.maxWriteRequestsOutstanding + settings.maxReadRequestsOutstanding);
if (auto const rc = cass_cluster_set_queue_size_io(*this, queueSize); rc != CASS_OK) {
throw std::runtime_error(fmt::format("Could not set queue size for IO per host: {}", cass_error_desc(rc)));
}
setupConnection(settings);
setupCertificate(settings);
setupCredentials(settings);
LOG(log_.info()) << "Threads: " << settings.threads;
LOG(log_.info()) << "Core connections per host: " << settings.coreConnectionsPerHost;
LOG(log_.info()) << "IO queue size: " << queueSize;
LOG(log_.info()) << "Batched writes auto-chunk size: " << settings.writeBatchSize;
}
void
@@ -94,27 +92,31 @@ Cluster::setupConnection(Settings const& settings)
std::visit(
overloadSet{
[this](Settings::ContactPoints const& points) { setupContactPoints(points); },
[this](Settings::SecureConnectionBundle const& bundle) { setupSecureBundle(bundle); }},
settings.connectionInfo);
[this](Settings::SecureConnectionBundle const& bundle) { setupSecureBundle(bundle); }
},
settings.connectionInfo
);
}
void
Cluster::setupContactPoints(Settings::ContactPoints const& points)
{
using std::to_string;
auto throwErrorIfNeeded = [](CassError rc, std::string const label, std::string const value) {
if (rc != CASS_OK)
throw std::runtime_error("Cassandra: Error setting " + label + " [" + value + "]: " + cass_error_desc(rc));
auto throwErrorIfNeeded = [](CassError rc, std::string const& label, std::string const& value) {
if (rc != CASS_OK) {
throw std::runtime_error(
fmt::format("Cassandra: Error setting {} [{}]: {}", label, value, cass_error_desc(rc))
);
}
};
{
log_.debug() << "Attempt connection using contact points: " << points.contactPoints;
LOG(log_.debug()) << "Attempt connection using contact points: " << points.contactPoints;
auto const rc = cass_cluster_set_contact_points(*this, points.contactPoints.data());
throwErrorIfNeeded(rc, "contact_points", points.contactPoints);
}
if (points.port)
{
if (points.port) {
auto const rc = cass_cluster_set_port(*this, points.port.value());
throwErrorIfNeeded(rc, "port", to_string(points.port.value()));
}
@@ -123,10 +125,9 @@ Cluster::setupContactPoints(Settings::ContactPoints const& points)
void
Cluster::setupSecureBundle(Settings::SecureConnectionBundle const& bundle)
{
log_.debug() << "Attempt connection using secure bundle";
if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle(*this, bundle.bundle.data()); rc != CASS_OK)
{
throw std::runtime_error("Failed to connect using secure connection bundle" + bundle.bundle);
LOG(log_.debug()) << "Attempt connection using secure bundle";
if (auto const rc = cass_cluster_set_cloud_secure_connection_bundle(*this, bundle.bundle.data()); rc != CASS_OK) {
throw std::runtime_error("Failed to connect using secure connection bundle " + bundle.bundle);
}
}
@@ -136,8 +137,8 @@ Cluster::setupCertificate(Settings const& settings)
if (not settings.certificate)
return;
log_.debug() << "Configure SSL context";
SslContext context = SslContext(*settings.certificate);
LOG(log_.debug()) << "Configure SSL context";
SslContext const context = SslContext(*settings.certificate);
cass_cluster_set_ssl(*this, context);
}
@@ -147,8 +148,8 @@ Cluster::setupCredentials(Settings const& settings)
if (not settings.username || not settings.password)
return;
log_.debug() << "Set credentials; username: " << settings.username.value();
LOG(log_.debug()) << "Set credentials; username: " << settings.username.value();
cass_cluster_set_credentials(*this, settings.username.value().c_str(), settings.password.value().c_str());
}
} // namespace Backend::Cassandra::detail
} // namespace data::cassandra::detail

View File

@@ -0,0 +1,145 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/cassandra/impl/ManagedObject.hpp"
#include "util/log/Logger.hpp"
#include <cassandra.h>
#include <chrono>
#include <optional>
#include <string>
#include <string_view>
#include <thread>
#include <variant>
namespace data::cassandra::detail {
// TODO: move Settings to public interface, not detail
/**
* @brief Bundles all cassandra settings in one place.
*/
struct Settings {
static constexpr std::size_t DEFAULT_CONNECTION_TIMEOUT = 10000;
static constexpr uint32_t DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING = 10'000;
static constexpr uint32_t DEFAULT_MAX_READ_REQUESTS_OUTSTANDING = 100'000;
static constexpr std::size_t DEFAULT_BATCH_SIZE = 20;
/**
* @brief Represents the configuration of contact points for cassandra.
*/
struct ContactPoints {
std::string contactPoints = "127.0.0.1"; // defaults to localhost
std::optional<uint16_t> port = {};
};
/**
* @brief Represents the configuration of a secure connection bundle.
*/
struct SecureConnectionBundle {
std::string bundle; // no meaningful default
};
/** @brief Enables or disables cassandra driver logger */
bool enableLog = false;
/** @brief Connect timeout specified in milliseconds */
std::chrono::milliseconds connectionTimeout = std::chrono::milliseconds{DEFAULT_CONNECTION_TIMEOUT};
/** @brief Request timeout specified in milliseconds */
std::chrono::milliseconds requestTimeout = std::chrono::milliseconds{0}; // no timeout at all
/** @brief Connection information; either ContactPoints or SecureConnectionBundle */
std::variant<ContactPoints, SecureConnectionBundle> connectionInfo = ContactPoints{};
/** @brief The number of threads for the driver to pool */
uint32_t threads = std::thread::hardware_concurrency();
/** @brief The maximum number of outstanding write requests at any given moment */
uint32_t maxWriteRequestsOutstanding = DEFAULT_MAX_WRITE_REQUESTS_OUTSTANDING;
/** @brief The maximum number of outstanding read requests at any given moment */
uint32_t maxReadRequestsOutstanding = DEFAULT_MAX_READ_REQUESTS_OUTSTANDING;
/** @brief The number of connection per host to always have active */
uint32_t coreConnectionsPerHost = 1u;
/** @brief Size of batches when writing */
std::size_t writeBatchSize = DEFAULT_BATCH_SIZE;
/** @brief Size of the IO queue */
std::optional<uint32_t> queueSizeIO{};
/** @brief SSL certificate */
std::optional<std::string> certificate{}; // ssl context
/** @brief Username/login */
std::optional<std::string> username{};
/** @brief Password to match the `username` */
std::optional<std::string> password{};
/**
* @brief Creates a new Settings object as a copy of the current one with overridden contact points.
*/
Settings
withContactPoints(std::string_view contactPoints)
{
auto tmp = *this;
tmp.connectionInfo = ContactPoints{std::string{contactPoints}};
return tmp;
}
/**
* @brief Returns the default settings.
*/
static Settings
defaultSettings()
{
return Settings();
}
};
class Cluster : public ManagedObject<CassCluster> {
util::Logger log_{"Backend"};
public:
Cluster(Settings const& settings);
private:
void
setupConnection(Settings const& settings);
void
setupContactPoints(Settings::ContactPoints const& points);
void
setupSecureBundle(Settings::SecureConnectionBundle const& bundle);
void
setupCertificate(Settings const& settings);
void
setupCredentials(Settings const& settings);
};
} // namespace data::cassandra::detail

View File

@@ -0,0 +1,90 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/cassandra/impl/ManagedObject.hpp"
#include <cassandra.h>
#include <ripple/basics/base_uint.h>
#include <cstdint>
#include <stdexcept>
#include <string>
#include <string_view>
#include <vector>
namespace data::cassandra::detail {
class Collection : public ManagedObject<CassCollection> {
static constexpr auto deleter = [](CassCollection* ptr) { cass_collection_free(ptr); };
static void
throwErrorIfNeeded(CassError const rc, std::string_view const label)
{
if (rc == CASS_OK)
return;
auto const tag = '[' + std::string{label} + ']';
throw std::logic_error(tag + ": " + cass_error_desc(rc));
}
public:
/* implicit */ Collection(CassCollection* ptr);
template <typename Type>
explicit Collection(std::vector<Type> const& value)
: ManagedObject{cass_collection_new(CASS_COLLECTION_TYPE_LIST, value.size()), deleter}
{
bind(value);
}
template <typename Type>
void
bind(std::vector<Type> const& values) const
{
for (auto const& value : values)
append(value);
}
void
append(bool const value) const
{
auto const rc = cass_collection_append_bool(*this, value ? cass_true : cass_false);
throwErrorIfNeeded(rc, "Bind bool");
}
void
append(int64_t const value) const
{
auto const rc = cass_collection_append_int64(*this, value);
throwErrorIfNeeded(rc, "Bind int64");
}
void
append(ripple::uint256 const& value) const
{
auto const rc = cass_collection_append_bytes(
*this,
static_cast<cass_byte_t const*>(static_cast<unsigned char const*>(value.data())),
ripple::uint256::size()
);
throwErrorIfNeeded(rc, "Bind ripple::uint256");
}
};
} // namespace data::cassandra::detail

View File

@@ -0,0 +1,538 @@
//------------------------------------------------------------------------------
/*
This file is part of clio: https://github.com/XRPLF/clio
Copyright (c) 2023, the clio developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#pragma once
#include "data/BackendCounters.hpp"
#include "data/BackendInterface.hpp"
#include "data/cassandra/Handle.hpp"
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/AsyncExecutor.hpp"
#include "util/Assert.hpp"
#include "util/Batching.hpp"
#include "util/log/Logger.hpp"
#include <boost/asio.hpp>
#include <boost/asio/associated_executor.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/json/object.hpp>
#include <atomic>
#include <chrono>
#include <condition_variable>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <mutex>
#include <optional>
#include <stdexcept>
#include <thread>
#include <type_traits>
#include <vector>
namespace data::cassandra::detail {
// TODO: this could probably be also moved out of detail and into the main cassandra namespace.
/**
* @brief Implements async and sync querying against the cassandra DB with support for throttling.
*
* Note: A lot of the code that uses yield is repeated below.
* This is ok for now because we are hopefully going to be getting rid of it entirely later on.
*/
template <typename HandleType = Handle, SomeBackendCounters BackendCountersType = BackendCounters>
class DefaultExecutionStrategy {
util::Logger log_{"Backend"};
std::uint32_t maxWriteRequestsOutstanding_;
std::atomic_uint32_t numWriteRequestsOutstanding_ = 0;
std::uint32_t maxReadRequestsOutstanding_;
std::atomic_uint32_t numReadRequestsOutstanding_ = 0;
std::size_t writeBatchSize_;
std::mutex throttleMutex_;
std::condition_variable throttleCv_;
std::mutex syncMutex_;
std::condition_variable syncCv_;
boost::asio::io_context ioc_;
std::optional<boost::asio::io_service::work> work_;
std::reference_wrapper<HandleType const> handle_;
std::thread thread_;
typename BackendCountersType::PtrType counters_;
public:
using ResultOrErrorType = typename HandleType::ResultOrErrorType;
using StatementType = typename HandleType::StatementType;
using PreparedStatementType = typename HandleType::PreparedStatementType;
using FutureType = typename HandleType::FutureType;
using FutureWithCallbackType = typename HandleType::FutureWithCallbackType;
using ResultType = typename HandleType::ResultType;
using CompletionTokenType = boost::asio::yield_context;
/**
* @param settings The settings to use
* @param handle A handle to the cassandra database
*/
DefaultExecutionStrategy(
Settings const& settings,
HandleType const& handle,
typename BackendCountersType::PtrType counters = BackendCountersType::make()
)
: maxWriteRequestsOutstanding_{settings.maxWriteRequestsOutstanding}
, maxReadRequestsOutstanding_{settings.maxReadRequestsOutstanding}
, writeBatchSize_{settings.writeBatchSize}
, work_{ioc_}
, handle_{std::cref(handle)}
, thread_{[this]() { ioc_.run(); }}
, counters_{std::move(counters)}
{
LOG(log_.info()) << "Max write requests outstanding is " << maxWriteRequestsOutstanding_
<< "; Max read requests outstanding is " << maxReadRequestsOutstanding_;
}
~DefaultExecutionStrategy()
{
work_.reset();
ioc_.stop();
thread_.join();
}
/**
* @brief Wait for all async writes to finish before unblocking.
*/
void
sync()
{
LOG(log_.debug()) << "Waiting to sync all writes...";
std::unique_lock<std::mutex> lck(syncMutex_);
syncCv_.wait(lck, [this]() { return finishedAllWriteRequests(); });
LOG(log_.debug()) << "Sync done.";
}
/**
* @return true if outstanding read requests allowance is exhausted; false otherwise
*/
bool
isTooBusy() const
{
bool const result = numReadRequestsOutstanding_ >= maxReadRequestsOutstanding_;
if (result)
counters_->registerTooBusy();
return result;
}
/**
* @brief Blocking query execution used for writing data.
*
* Retries forever sleeping for 5 milliseconds between attempts.
*/
ResultOrErrorType
writeSync(StatementType const& statement)
{
auto const startTime = std::chrono::steady_clock::now();
while (true) {
auto res = handle_.get().execute(statement);
if (res) {
counters_->registerWriteSync(startTime);
return res;
}
counters_->registerWriteSyncRetry();
LOG(log_.warn()) << "Cassandra sync write error, retrying: " << res.error();
std::this_thread::sleep_for(std::chrono::milliseconds(5));
}
}
/**
* @brief Blocking query execution used for writing data.
*
* Retries forever sleeping for 5 milliseconds between attempts.
*/
template <typename... Args>
ResultOrErrorType
writeSync(PreparedStatementType const& preparedStatement, Args&&... args)
{
return writeSync(preparedStatement.bind(std::forward<Args>(args)...));
}
/**
* @brief Non-blocking query execution used for writing data.
*
* Retries forever with retry policy specified by @ref AsyncExecutor
*
* @param preparedStatement Statement to prepare and execute
* @param args Args to bind to the prepared statement
* @throw DatabaseTimeout on timeout
*/
template <typename... Args>
void
write(PreparedStatementType const& preparedStatement, Args&&... args)
{
auto const startTime = std::chrono::steady_clock::now();
auto statement = preparedStatement.bind(std::forward<Args>(args)...);
incrementOutstandingRequestCount();
counters_->registerWriteStarted();
// Note: lifetime is controlled by std::shared_from_this internally
AsyncExecutor<std::decay_t<decltype(statement)>, HandleType>::run(
ioc_,
handle_,
std::move(statement),
[this, startTime](auto const&) {
decrementOutstandingRequestCount();
counters_->registerWriteFinished(startTime);
},
[this]() { counters_->registerWriteRetry(); }
);
}
/**
* @brief Non-blocking batched query execution used for writing data.
*
* Retries forever with retry policy specified by @ref AsyncExecutor.
*
* @param statements Vector of statements to execute as a batch
* @throw DatabaseTimeout on timeout
*/
void
write(std::vector<StatementType>&& statements)
{
if (statements.empty())
return;
util::forEachBatch(std::move(statements), writeBatchSize_, [this](auto begin, auto end) {
auto const startTime = std::chrono::steady_clock::now();
auto chunk = std::vector<StatementType>{};
chunk.reserve(std::distance(begin, end));
std::move(begin, end, std::back_inserter(chunk));
incrementOutstandingRequestCount();
counters_->registerWriteStarted();
// Note: lifetime is controlled by std::shared_from_this internally
AsyncExecutor<std::decay_t<decltype(chunk)>, HandleType>::run(
ioc_,
handle_,
std::move(chunk),
[this, startTime](auto const&) {
decrementOutstandingRequestCount();
counters_->registerWriteFinished(startTime);
},
[this]() { counters_->registerWriteRetry(); }
);
});
}
/**
* @brief Coroutine-based query execution used for reading data.
*
* Retries forever until successful or throws an exception on timeout.
*
* @param token Completion token (yield_context)
* @param preparedStatement Statement to prepare and execute
* @param args Args to bind to the prepared statement
* @throw DatabaseTimeout on timeout
* @return ResultType or error wrapped in Expected
*/
template <typename... Args>
[[maybe_unused]] ResultOrErrorType
read(CompletionTokenType token, PreparedStatementType const& preparedStatement, Args&&... args)
{
return read(token, preparedStatement.bind(std::forward<Args>(args)...));
}
/**
* @brief Coroutine-based query execution used for reading data.
*
* Retries forever until successful or throws an exception on timeout.
*
* @param token Completion token (yield_context)
* @param statements Statements to execute in a batch
* @throw DatabaseTimeout on timeout
* @return ResultType or error wrapped in Expected
*/
[[maybe_unused]] ResultOrErrorType
read(CompletionTokenType token, std::vector<StatementType> const& statements)
{
auto const startTime = std::chrono::steady_clock::now();
auto const numStatements = statements.size();
std::optional<FutureWithCallbackType> future;
counters_->registerReadStarted(numStatements);
// todo: perhaps use policy instead
while (true) {
numReadRequestsOutstanding_ += numStatements;
auto init = [this, &statements, &future]<typename Self>(Self& self) {
auto sself = std::make_shared<Self>(std::move(self));
future.emplace(handle_.get().asyncExecute(statements, [sself](auto&& res) mutable {
boost::asio::post(
boost::asio::get_associated_executor(*sself),
[sself, res = std::forward<decltype(res)>(res)]() mutable { sself->complete(std::move(res)); }
);
}));
};
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
init, token, boost::asio::get_associated_executor(token)
);
numReadRequestsOutstanding_ -= numStatements;
if (res) {
counters_->registerReadFinished(startTime, numStatements);
return res;
}
LOG(log_.error()) << "Failed batch read in coroutine: " << res.error();
try {
throwErrorIfNeeded(res.error());
} catch (...) {
counters_->registerReadError(numStatements);
throw;
}
counters_->registerReadRetry(numStatements);
}
}
/**
* @brief Coroutine-based query execution used for reading data.
*
* Retries forever until successful or throws an exception on timeout.
*
* @param token Completion token (yield_context)
* @param statement Statement to execute
* @throw DatabaseTimeout on timeout
* @return ResultType or error wrapped in Expected
*/
[[maybe_unused]] ResultOrErrorType
read(CompletionTokenType token, StatementType const& statement)
{
auto const startTime = std::chrono::steady_clock::now();
std::optional<FutureWithCallbackType> future;
counters_->registerReadStarted();
// todo: perhaps use policy instead
while (true) {
++numReadRequestsOutstanding_;
auto init = [this, &statement, &future]<typename Self>(Self& self) {
auto sself = std::make_shared<Self>(std::move(self));
future.emplace(handle_.get().asyncExecute(statement, [sself](auto&& res) mutable {
boost::asio::post(
boost::asio::get_associated_executor(*sself),
[sself, res = std::forward<decltype(res)>(res)]() mutable { sself->complete(std::move(res)); }
);
}));
};
auto res = boost::asio::async_compose<CompletionTokenType, void(ResultOrErrorType)>(
init, token, boost::asio::get_associated_executor(token)
);
--numReadRequestsOutstanding_;
if (res) {
counters_->registerReadFinished(startTime);
return res;
}
LOG(log_.error()) << "Failed read in coroutine: " << res.error();
try {
throwErrorIfNeeded(res.error());
} catch (...) {
counters_->registerReadError();
throw;
}
counters_->registerReadRetry();
}
}
/**
* @brief Coroutine-based query execution used for reading data.
*
* Attempts to execute each statement. On any error the whole vector will be
* discarded and exception will be thrown.
*
* @param token Completion token (yield_context)
* @param statements Statements to execute
* @throw DatabaseTimeout on db error
* @return Vector of results
*/
std::vector<ResultType>
readEach(CompletionTokenType token, std::vector<StatementType> const& statements)
{
auto const startTime = std::chrono::steady_clock::now();
std::atomic_uint64_t errorsCount = 0u;
std::atomic_int numOutstanding = statements.size();
numReadRequestsOutstanding_ += statements.size();
auto futures = std::vector<FutureWithCallbackType>{};
futures.reserve(numOutstanding);
counters_->registerReadStarted(statements.size());
auto init = [this, &statements, &futures, &errorsCount, &numOutstanding]<typename Self>(Self& self) {
auto sself = std::make_shared<Self>(std::move(self));
auto executionHandler = [&errorsCount, &numOutstanding, sself](auto const& res) mutable {
if (not res)
++errorsCount;
// when all async operations complete unblock the result
if (--numOutstanding == 0) {
boost::asio::post(boost::asio::get_associated_executor(*sself), [sself]() mutable {
sself->complete();
});
}
};
std::transform(
std::cbegin(statements),
std::cend(statements),
std::back_inserter(futures),
[this, &executionHandler](auto const& statement) {
return handle_.get().asyncExecute(statement, executionHandler);
}
);
};
boost::asio::async_compose<CompletionTokenType, void()>(
init, token, boost::asio::get_associated_executor(token)
);
numReadRequestsOutstanding_ -= statements.size();
if (errorsCount > 0) {
ASSERT(errorsCount <= statements.size(), "Errors number cannot exceed statements number");
counters_->registerReadError(errorsCount);
counters_->registerReadFinished(startTime, statements.size() - errorsCount);
throw DatabaseTimeout{};
}
counters_->registerReadFinished(startTime, statements.size());
std::vector<ResultType> results;
results.reserve(futures.size());
// it's safe to call blocking get on futures here as we already waited for the coroutine to resume above.
std::transform(
std::make_move_iterator(std::begin(futures)),
std::make_move_iterator(std::end(futures)),
std::back_inserter(results),
[](auto&& future) {
auto entry = future.get();
auto&& res = entry.value();
return std::move(res);
}
);
ASSERT(
futures.size() == statements.size(),
"Futures size must be equal to statements size. Got {} and {}",
futures.size(),
statements.size()
);
ASSERT(
results.size() == statements.size(),
"Results size must be equal to statements size. Got {} and {}",
results.size(),
statements.size()
);
return results;
}
/**
* @brief Get statistics about the backend.
*/
boost::json::object
stats() const
{
return counters_->report();
}
private:
void
incrementOutstandingRequestCount()
{
{
std::unique_lock<std::mutex> lck(throttleMutex_);
if (!canAddWriteRequest()) {
LOG(log_.trace()) << "Max outstanding requests reached. "
<< "Waiting for other requests to finish";
throttleCv_.wait(lck, [this]() { return canAddWriteRequest(); });
}
}
++numWriteRequestsOutstanding_;
}
void
decrementOutstandingRequestCount()
{
// sanity check
ASSERT(numWriteRequestsOutstanding_ > 0, "Decrementing num outstanding below 0");
size_t const cur = (--numWriteRequestsOutstanding_);
{
// mutex lock required to prevent race condition around spurious
// wakeup
std::lock_guard const lck(throttleMutex_);
throttleCv_.notify_one();
}
if (cur == 0) {
// mutex lock required to prevent race condition around spurious
// wakeup
std::lock_guard const lck(syncMutex_);
syncCv_.notify_one();
}
}
bool
canAddWriteRequest() const
{
return numWriteRequestsOutstanding_ < maxWriteRequestsOutstanding_;
}
bool
finishedAllWriteRequests() const
{
return numWriteRequestsOutstanding_ == 0;
}
void
throwErrorIfNeeded(CassandraError err) const
{
if (err.isTimeout())
throw DatabaseTimeout();
if (err.isInvalidQuery())
throw std::runtime_error("Invalid query");
}
};
} // namespace data::cassandra::detail

View File

@@ -17,18 +17,25 @@
*/
//==============================================================================
#include <backend/cassandra/Error.h>
#include <backend/cassandra/impl/Future.h>
#include <backend/cassandra/impl/Result.h>
#include "data/cassandra/impl/Future.hpp"
#include <exception>
#include <vector>
#include "data/cassandra/Error.hpp"
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/ManagedObject.hpp"
#include "data/cassandra/impl/Result.hpp"
#include <cassandra.h>
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
namespace {
static constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
constexpr auto futureDeleter = [](CassFuture* ptr) { cass_future_free(ptr); };
} // namespace
namespace Backend::Cassandra::detail {
namespace data::cassandra::detail {
/* implicit */ Future::Future(CassFuture* ptr) : ManagedObject{ptr, futureDeleter}
{
@@ -37,11 +44,10 @@ namespace Backend::Cassandra::detail {
MaybeError
Future::await() const
{
if (auto const rc = cass_future_error_code(*this); rc)
{
auto errMsg = [this](std::string label) {
char const* message;
std::size_t len;
if (auto const rc = cass_future_error_code(*this); rc) {
auto errMsg = [this](std::string const& label) {
char const* message = nullptr;
std::size_t len = 0;
cass_future_error_message(*this, &message, &len);
return label + ": " + std::string{message, len};
}(cass_error_desc(rc));
@@ -53,45 +59,42 @@ Future::await() const
ResultOrError
Future::get() const
{
if (auto const rc = cass_future_error_code(*this); rc)
{
auto const errMsg = [this](std::string label) {
char const* message;
std::size_t len;
if (auto const rc = cass_future_error_code(*this); rc) {
auto const errMsg = [this](std::string const& label) {
char const* message = nullptr;
std::size_t len = 0;
cass_future_error_message(*this, &message, &len);
return label + ": " + std::string{message, len};
}("future::get()");
return Error{CassandraError{errMsg, rc}};
}
else
{
return Result{cass_future_get_result(*this)};
}
return Result{cass_future_get_result(*this)};
}
void
invokeHelper(CassFuture* ptr, void* cbPtr)
{
// Note: can't use Future{ptr}.get() because double free will occur :/
auto* cb = static_cast<FutureWithCallback::fn_t*>(cbPtr);
if (auto const rc = cass_future_error_code(ptr); rc)
{
auto const errMsg = [&ptr](std::string label) {
char const* message;
std::size_t len;
// Note2: we are moving/copying it locally as a workaround for an issue we are seeing from asio recently.
// stackoverflow.com/questions/77004137/boost-asio-async-compose-gets-stuck-under-load
auto* cb = static_cast<FutureWithCallback::FnType*>(cbPtr);
auto local = std::make_unique<FutureWithCallback::FnType>(std::move(*cb));
if (auto const rc = cass_future_error_code(ptr); rc) {
auto const errMsg = [&ptr](std::string const& label) {
char const* message = nullptr;
std::size_t len = 0;
cass_future_error_message(ptr, &message, &len);
return label + ": " + std::string{message, len};
}("invokeHelper");
(*cb)(Error{CassandraError{errMsg, rc}});
}
else
{
(*cb)(Result{cass_future_get_result(ptr)});
(*local)(Error{CassandraError{errMsg, rc}});
} else {
(*local)(Result{cass_future_get_result(ptr)});
}
}
/* implicit */ FutureWithCallback::FutureWithCallback(CassFuture* ptr, fn_t&& cb)
: Future{ptr}, cb_{std::make_unique<fn_t>(std::move(cb))}
/* implicit */ FutureWithCallback::FutureWithCallback(CassFuture* ptr, FnType&& cb)
: Future{ptr}, cb_{std::make_unique<FnType>(std::move(cb))}
{
// Instead of passing `this` as the userdata void*, we pass the address of
// the callback itself which will survive std::move of the
@@ -99,4 +102,4 @@ invokeHelper(CassFuture* ptr, void* cbPtr)
cass_future_set_callback(*this, &invokeHelper, cb_.get());
}
} // namespace Backend::Cassandra::detail
} // namespace data::cassandra::detail

View File

@@ -19,15 +19,17 @@
#pragma once
#include <backend/cassandra/Types.h>
#include <backend/cassandra/impl/ManagedObject.h>
#include "data/cassandra/Types.hpp"
#include "data/cassandra/impl/ManagedObject.hpp"
#include <cassandra.h>
namespace Backend::Cassandra::detail {
#include <functional>
#include <memory>
struct Future : public ManagedObject<CassFuture>
{
namespace data::cassandra::detail {
struct Future : public ManagedObject<CassFuture> {
/* implicit */ Future(CassFuture* ptr);
MaybeError
@@ -38,21 +40,20 @@ struct Future : public ManagedObject<CassFuture>
};
void
invokeHelper(CassFuture* ptr, void* self);
invokeHelper(CassFuture* ptr, void* cbPtr);
class FutureWithCallback : public Future
{
class FutureWithCallback : public Future {
public:
using fn_t = std::function<void(ResultOrError)>;
using fn_ptr_t = std::unique_ptr<fn_t>;
using FnType = std::function<void(ResultOrError)>;
using FnPtrType = std::unique_ptr<FnType>;
/* implicit */ FutureWithCallback(CassFuture* ptr, fn_t&& cb);
/* implicit */ FutureWithCallback(CassFuture* ptr, FnType&& cb);
FutureWithCallback(FutureWithCallback const&) = delete;
FutureWithCallback(FutureWithCallback&&) = default;
private:
/*! Wrapped in a unique_ptr so it can survive std::move :/ */
fn_ptr_t cb_;
/** Wrapped in a unique_ptr so it can survive std::move :/ */
FnPtrType cb_;
};
} // namespace Backend::Cassandra::detail
} // namespace data::cassandra::detail

View File

@@ -20,12 +20,12 @@
#pragma once
#include <memory>
#include <stdexcept>
namespace Backend::Cassandra::detail {
namespace data::cassandra::detail {
template <typename Managed>
class ManagedObject
{
class ManagedObject {
protected:
std::unique_ptr<Managed, void (*)(Managed*)> ptr_;
@@ -36,12 +36,11 @@ public:
if (rawPtr == nullptr)
throw std::runtime_error("Could not create DB object - got nullptr");
}
ManagedObject(ManagedObject&&) = default;
operator Managed* const() const
operator Managed*() const
{
return ptr_.get();
}
};
} // namespace Backend::Cassandra::detail
} // namespace data::cassandra::detail

Some files were not shown because too many files have changed in this diff Show More