mirror of
https://github.com/XRPLF/clio.git
synced 2025-11-04 11:55:51 +00:00
Compare commits
492 Commits
0.1.1
...
release/2.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c1037785a1 | ||
|
|
b1aafbaacb | ||
|
|
965a2883fe | ||
|
|
1e9eaee311 | ||
|
|
a15ee2a8cc | ||
|
|
26ed78fc05 | ||
|
|
8575f786a8 | ||
|
|
08b02c64cb | ||
|
|
b358649cf9 | ||
|
|
6bd72355db | ||
|
|
a1699d7484 | ||
|
|
957aadd25a | ||
|
|
8f89a5913d | ||
|
|
ecfe5e84e5 | ||
|
|
03c0940649 | ||
|
|
dc5aacfe39 | ||
|
|
3fda74e3f7 | ||
|
|
df27c4e629 | ||
|
|
37ee74c293 | ||
|
|
ec335176bb | ||
|
|
ab33b26ec4 | ||
|
|
28c8fa2a9a | ||
|
|
12bbed194c | ||
|
|
1fa09006f8 | ||
|
|
e3b6fc4bd4 | ||
|
|
34594ff8c0 | ||
|
|
40eeb57920 | ||
|
|
3eb36c049c | ||
|
|
81602e8ae7 | ||
|
|
0cef9e0620 | ||
|
|
81d1b30607 | ||
|
|
923d021c83 | ||
|
|
cd2b09ffb7 | ||
|
|
3c62a1f42c | ||
|
|
f97e0690c8 | ||
|
|
eeaccbabd9 | ||
|
|
13d2d4e2ca | ||
|
|
350a45e7e2 | ||
|
|
ce86572274 | ||
|
|
ac97788db8 | ||
|
|
2893492569 | ||
|
|
b63e98bda0 | ||
|
|
f4df5c2185 | ||
|
|
93d5c12b14 | ||
|
|
2514b7986e | ||
|
|
d30e63d49a | ||
|
|
61f1e0853d | ||
|
|
eb1831c489 | ||
|
|
07bd4b0760 | ||
|
|
e26a1e37b5 | ||
|
|
e89640bcfb | ||
|
|
ae135759ef | ||
|
|
28188aa0f9 | ||
|
|
af485a0634 | ||
|
|
b609298870 | ||
|
|
d077093a8d | ||
|
|
781f3b3c48 | ||
|
|
a8bae96ad4 | ||
|
|
fe9649d872 | ||
|
|
431b5f5ab8 | ||
|
|
b1dc2775fb | ||
|
|
dd35a7cfd2 | ||
|
|
a9d685d5c0 | ||
|
|
6065d324b5 | ||
|
|
fe7b5fe18f | ||
|
|
1c663988f5 | ||
|
|
d11d566121 | ||
|
|
a467cb2526 | ||
|
|
f62e36dc94 | ||
|
|
d933ce2a29 | ||
|
|
db751e3807 | ||
|
|
3c4a8f0cfb | ||
|
|
397ce97175 | ||
|
|
ac6ad13f6c | ||
|
|
7d1d1749bc | ||
|
|
acf359d631 | ||
|
|
a34e107b86 | ||
|
|
b886586de3 | ||
|
|
a57abb15a3 | ||
|
|
c87586a265 | ||
|
|
8172670c93 | ||
|
|
3fdcd3315b | ||
|
|
dd018f1c5e | ||
|
|
c2b462da75 | ||
|
|
252920ec57 | ||
|
|
9ef6801c55 | ||
|
|
24c562fa2a | ||
|
|
35f119a268 | ||
|
|
1be368dcaf | ||
|
|
a5fbb01299 | ||
|
|
3b75d88a35 | ||
|
|
f0224581a5 | ||
|
|
b998473673 | ||
|
|
8ebe2d6a80 | ||
|
|
3bab90ca7a | ||
|
|
74660aebf1 | ||
|
|
db08de466a | ||
|
|
1bacad9e49 | ||
|
|
ca16858878 | ||
|
|
feae85782c | ||
|
|
b016c1d7ba | ||
|
|
0597a9d685 | ||
|
|
05bea6a971 | ||
|
|
fa660ef400 | ||
|
|
25d9e3cc36 | ||
|
|
58f13e1660 | ||
|
|
a16b680a7a | ||
|
|
320ebaa5d2 | ||
|
|
058df4d12a | ||
|
|
5145d07693 | ||
|
|
5e9e5f6f65 | ||
|
|
1ce7bcbc28 | ||
|
|
243858df12 | ||
|
|
b363cc93af | ||
|
|
200d97f0de | ||
|
|
1ec5d3e5a3 | ||
|
|
e062121917 | ||
|
|
1aab2b94b1 | ||
|
|
5de87b9ef8 | ||
|
|
398db13f4d | ||
|
|
5e8ffb66b4 | ||
|
|
939740494b | ||
|
|
ff3d2b5600 | ||
|
|
7080b4d549 | ||
|
|
8d783ecd6a | ||
|
|
5e6682ddc7 | ||
|
|
fca29694a0 | ||
|
|
a541e6d00e | ||
|
|
9bd38dd290 | ||
|
|
f683b25f76 | ||
|
|
91ad1ffc3b | ||
|
|
64b4a908da | ||
|
|
ac752c656e | ||
|
|
4fe868aaeb | ||
|
|
59eb40a1f2 | ||
|
|
0b5f667e4a | ||
|
|
fa42c5c900 | ||
|
|
0818b6ce5b | ||
|
|
e2cc56d25a | ||
|
|
caaa01bf0f | ||
|
|
4b53bef1f5 | ||
|
|
69f5025a29 | ||
|
|
d1c41a8bb7 | ||
|
|
207ba51461 | ||
|
|
ebe7688ccb | ||
|
|
6d9f8a7ead | ||
|
|
6ca777ea96 | ||
|
|
963685dd31 | ||
|
|
e36545058d | ||
|
|
44527140f0 | ||
|
|
0eaaa1fb31 | ||
|
|
1846f629a5 | ||
|
|
83af5af3c6 | ||
|
|
418a0ddbf2 | ||
|
|
6cfbfda014 | ||
|
|
91648f98ad | ||
|
|
71e1637c5f | ||
|
|
59cd2ce5aa | ||
|
|
d783edd57a | ||
|
|
1ce8a58167 | ||
|
|
92e5c4792b | ||
|
|
d7f36733bc | ||
|
|
435d56e7c5 | ||
|
|
bf3b24867c | ||
|
|
ec70127050 | ||
|
|
547cb340bd | ||
|
|
c20b14494a | ||
|
|
696b1a585c | ||
|
|
23442ff1a7 | ||
|
|
db4046e02a | ||
|
|
fc1b5ae4da | ||
|
|
5411fd7497 | ||
|
|
f6488f7024 | ||
|
|
e3ada6c5da | ||
|
|
d61d702ccd | ||
|
|
4d42cb3cdb | ||
|
|
111b55b397 | ||
|
|
c90bc15959 | ||
|
|
1804e3e9c0 | ||
|
|
24f69acd9e | ||
|
|
98d0a963dc | ||
|
|
665890d410 | ||
|
|
545886561f | ||
|
|
68eec01dbc | ||
|
|
02621fe02e | ||
|
|
6ad72446d1 | ||
|
|
1d0a43669b | ||
|
|
71aabc8c29 | ||
|
|
6b98579bfb | ||
|
|
375ac2ffa6 | ||
|
|
c6ca650767 | ||
|
|
2336148d0d | ||
|
|
12178abf4d | ||
|
|
b8705ae086 | ||
|
|
b83d7478ef | ||
|
|
4fd6d51d21 | ||
|
|
d195bdb66d | ||
|
|
50dbb51627 | ||
|
|
2f369e175c | ||
|
|
47e03a7da3 | ||
|
|
d7b84a2e7a | ||
|
|
e79425bc21 | ||
|
|
7710468f37 | ||
|
|
210d7fdbc8 | ||
|
|
ba8e7188ca | ||
|
|
271323b0f4 | ||
|
|
7b306f3ba0 | ||
|
|
73805d44ad | ||
|
|
f19772907d | ||
|
|
616f0176c9 | ||
|
|
9f4f5d319e | ||
|
|
dcbc4577c2 | ||
|
|
f4d8e18bf7 | ||
|
|
b3e001ebfb | ||
|
|
524821c0b0 | ||
|
|
a292a607c2 | ||
|
|
81894c0a90 | ||
|
|
0a7def18cd | ||
|
|
1e969ba13b | ||
|
|
ef62718a27 | ||
|
|
aadd9e50f0 | ||
|
|
d9e89746a4 | ||
|
|
557ea5d7f6 | ||
|
|
4cc3b3ec0f | ||
|
|
a960471ef4 | ||
|
|
871d43c85f | ||
|
|
5ce3fff788 | ||
|
|
a76194d299 | ||
|
|
14f9f98cf2 | ||
|
|
01e4eed130 | ||
|
|
893315c50d | ||
|
|
b83d206ced | ||
|
|
9d28e64383 | ||
|
|
b873af2d43 | ||
|
|
435db339df | ||
|
|
9836e4ceaf | ||
|
|
5d2c079f1a | ||
|
|
244337c5b6 | ||
|
|
b07fbb14dc | ||
|
|
7e8569b03a | ||
|
|
fc0c93b2ee | ||
|
|
8ba7388d58 | ||
|
|
0bbb539d0b | ||
|
|
c50174235f | ||
|
|
aace437245 | ||
|
|
2e3e8cd779 | ||
|
|
6f93e1003e | ||
|
|
14978ca91d | ||
|
|
9adcaeb21b | ||
|
|
d94fe4e7ab | ||
|
|
c8029255ba | ||
|
|
d548d44a61 | ||
|
|
4cae248b5c | ||
|
|
b3db4cadab | ||
|
|
02c0a1f11d | ||
|
|
6bc2ec745f | ||
|
|
d7d5d61747 | ||
|
|
f1b3a6b511 | ||
|
|
f52f36ecbc | ||
|
|
860d10cddc | ||
|
|
36ac3215e2 | ||
|
|
7776a5ffb6 | ||
|
|
4b2d53fc2f | ||
|
|
9a19519550 | ||
|
|
88e25687dc | ||
|
|
93e2ac529d | ||
|
|
0bc84fefbf | ||
|
|
36bb20806e | ||
|
|
dfe974d5ab | ||
|
|
bf65cfabae | ||
|
|
f42e024f38 | ||
|
|
d816ef54ab | ||
|
|
e60fd3e58e | ||
|
|
654168efec | ||
|
|
5d06a79f13 | ||
|
|
3320125d8f | ||
|
|
a1f93b09f7 | ||
|
|
232acaeff2 | ||
|
|
d86104577b | ||
|
|
e9937fab76 | ||
|
|
75c2011845 | ||
|
|
5604b37c02 | ||
|
|
f604856eab | ||
|
|
b69e4350a1 | ||
|
|
95da706fed | ||
|
|
1bb67217e5 | ||
|
|
21f1b70daf | ||
|
|
430812abf5 | ||
|
|
8d5e28ef30 | ||
|
|
4180d81819 | ||
|
|
21eeb9ae02 | ||
|
|
edd2e9dd4b | ||
|
|
b25ac5d707 | ||
|
|
9d10cff873 | ||
|
|
bc438ce58a | ||
|
|
b99a68e55f | ||
|
|
7a819f4955 | ||
|
|
6b78b1ad8b | ||
|
|
488e28e874 | ||
|
|
d26dd5a8cf | ||
|
|
67f0fa26ae | ||
|
|
a3211f4458 | ||
|
|
7d4e5ff0bd | ||
|
|
f6c2008540 | ||
|
|
d74ca4940b | ||
|
|
739807a7d7 | ||
|
|
9fa26be13a | ||
|
|
f0555af284 | ||
|
|
b7fa9b09fe | ||
|
|
08f7a7a476 | ||
|
|
703196b013 | ||
|
|
284986e7b7 | ||
|
|
09ac1b866e | ||
|
|
4112cc42df | ||
|
|
c07e04ce84 | ||
|
|
19455b4d6c | ||
|
|
1186622e58 | ||
|
|
023e02da15 | ||
|
|
8dbf049a71 | ||
|
|
fe5150dba4 | ||
|
|
992d5a7a70 | ||
|
|
b702b6e14e | ||
|
|
557c76233a | ||
|
|
6ba9903a37 | ||
|
|
81bf9894e4 | ||
|
|
047d64983c | ||
|
|
1708b929b8 | ||
|
|
a377514287 | ||
|
|
c51d696181 | ||
|
|
1a9d328f94 | ||
|
|
3b1dc60f63 | ||
|
|
0c2ca1737e | ||
|
|
2f65a26dc7 | ||
|
|
37c765a072 | ||
|
|
29f1f860d8 | ||
|
|
414a416938 | ||
|
|
1a4180f678 | ||
|
|
bca086d776 | ||
|
|
f81086f40c | ||
|
|
962fb12410 | ||
|
|
10af787324 | ||
|
|
5f32bbbd81 | ||
|
|
fa78d4e783 | ||
|
|
05b03b2086 | ||
|
|
866b1d32b3 | ||
|
|
8a1f00debb | ||
|
|
3ec5755930 | ||
|
|
a0d173feb8 | ||
|
|
b0f678411c | ||
|
|
1369eaeef6 | ||
|
|
bf217345ae | ||
|
|
7bb567761c | ||
|
|
4b94ed3e55 | ||
|
|
75c0d22f87 | ||
|
|
9803e86158 | ||
|
|
0f7e1d5517 | ||
|
|
cf7a6ecc89 | ||
|
|
5c9dce0f8a | ||
|
|
041aba9a0b | ||
|
|
b13c44eb12 | ||
|
|
a47bf2e8fe | ||
|
|
4b8dd7b981 | ||
|
|
d2c870db92 | ||
|
|
8e17039586 | ||
|
|
25067c97ed | ||
|
|
1310e5dde9 | ||
|
|
0a5bf911c1 | ||
|
|
6015faa0d3 | ||
|
|
e68fd3251a | ||
|
|
c13ac79552 | ||
|
|
b1299792a6 | ||
|
|
2cbf09d6ae | ||
|
|
42cf55fd0e | ||
|
|
e825be24cc | ||
|
|
997742b555 | ||
|
|
031ad411a6 | ||
|
|
486f1f2fd2 | ||
|
|
739dd81981 | ||
|
|
1f900fcf7f | ||
|
|
fc68664b02 | ||
|
|
ffa5c58b32 | ||
|
|
4bf3a228dc | ||
|
|
9091bb06f4 | ||
|
|
41e3176c56 | ||
|
|
bedca85c78 | ||
|
|
39157f8be4 | ||
|
|
3affda8b13 | ||
|
|
8cc2de5643 | ||
|
|
9b74b3f898 | ||
|
|
ea2837749a | ||
|
|
8bd8ab9b8a | ||
|
|
dc89d23e5a | ||
|
|
734c7a5c36 | ||
|
|
b17ef28f55 | ||
|
|
e56bd7b29e | ||
|
|
5bf334e5f7 | ||
|
|
97ef66d130 | ||
|
|
4c9c606202 | ||
|
|
a885551006 | ||
|
|
fae1ec0c8d | ||
|
|
de23f015d6 | ||
|
|
37f9493d15 | ||
|
|
49387059ef | ||
|
|
744af4b639 | ||
|
|
db2b9dac3b | ||
|
|
ccf73dc68c | ||
|
|
3de421c390 | ||
|
|
d4a9560c3f | ||
|
|
983aa29271 | ||
|
|
0ebe92de68 | ||
|
|
eb1ea28e27 | ||
|
|
1764f3524e | ||
|
|
777ae24f62 | ||
|
|
1ada879072 | ||
|
|
e2792f5a0c | ||
|
|
97c431680a | ||
|
|
0b454a2316 | ||
|
|
b7cae53fcd | ||
|
|
ac45cce5bd | ||
|
|
ef39c04e1e | ||
|
|
83a099a547 | ||
|
|
73337d0819 | ||
|
|
816625c44e | ||
|
|
48e87d7c07 | ||
|
|
dfe18ed682 | ||
|
|
92a072d7a8 | ||
|
|
24fca61b56 | ||
|
|
ae8303fdc8 | ||
|
|
709a8463b8 | ||
|
|
84d31986d1 | ||
|
|
d50f229631 | ||
|
|
379c89fb02 | ||
|
|
81f7171368 | ||
|
|
629b35d1dd | ||
|
|
6fc4cee195 | ||
|
|
b01813ac3d | ||
|
|
6bf8c5bc4e | ||
|
|
2ffd98f895 | ||
|
|
3edead32ba | ||
|
|
28980734ae | ||
|
|
ce60c8f64d | ||
|
|
39ef2ae33c | ||
|
|
d83975e750 | ||
|
|
4468302852 | ||
|
|
a704cf7cfe | ||
|
|
05d09cc352 | ||
|
|
ae96ac7baf | ||
|
|
4579fa2f26 | ||
|
|
1e7645419f | ||
|
|
35db5d3da9 | ||
|
|
4e581e659f | ||
|
|
55f0536dca | ||
|
|
a3a15754b4 | ||
|
|
59d7d1bc49 | ||
|
|
5f5648470a | ||
|
|
13afe9373d | ||
|
|
9a79bdc50b | ||
|
|
7d5415e8b0 | ||
|
|
54669420bf | ||
|
|
a62849b89a | ||
|
|
20c2654abc | ||
|
|
37c810f6fa | ||
|
|
d64753c0dd | ||
|
|
92d6687151 | ||
|
|
fa8405df83 | ||
|
|
3d3b8e91b6 | ||
|
|
14a972c8e2 | ||
|
|
166ff63dbc | ||
|
|
b7ae6a0495 | ||
|
|
d0ea9d20ab | ||
|
|
b45b34edb1 | ||
|
|
7ecb894632 | ||
|
|
8de39739fa | ||
|
|
f16a05ae7a | ||
|
|
458fac776c | ||
|
|
af575b1bcf | ||
|
|
ee615a290b | ||
|
|
31cc06d4f4 | ||
|
|
f90dac2f85 | ||
|
|
8a5be14ba8 | ||
|
|
ba6b764e38 | ||
|
|
9939f6e6f4 | ||
|
|
a72aa73afe | ||
|
|
3d02803135 | ||
|
|
3f47b85e3b | ||
|
|
e7204a513a | ||
|
|
f30ef6e294 | ||
|
|
73083339a4 | ||
|
|
f2f2f92aa6 | ||
|
|
8bf44c978e | ||
|
|
cdabdec620 | ||
|
|
d4dc827ad1 |
@@ -1,7 +1,7 @@
|
||||
---
|
||||
Language: Cpp
|
||||
AccessModifierOffset: -4
|
||||
AlignAfterOpenBracket: AlwaysBreak
|
||||
AlignAfterOpenBracket: BlockIndent
|
||||
AlignConsecutiveAssignments: false
|
||||
AlignConsecutiveDeclarations: false
|
||||
AlignEscapedNewlinesLeft: true
|
||||
@@ -18,23 +18,11 @@ AlwaysBreakBeforeMultilineStrings: true
|
||||
AlwaysBreakTemplateDeclarations: true
|
||||
BinPackArguments: false
|
||||
BinPackParameters: false
|
||||
BraceWrapping:
|
||||
AfterClass: true
|
||||
AfterControlStatement: true
|
||||
AfterEnum: false
|
||||
AfterFunction: true
|
||||
AfterNamespace: false
|
||||
AfterObjCDeclaration: true
|
||||
AfterStruct: true
|
||||
AfterUnion: true
|
||||
BeforeCatch: true
|
||||
BeforeElse: true
|
||||
IndentBraces: false
|
||||
BreakBeforeBinaryOperators: false
|
||||
BreakBeforeBraces: Custom
|
||||
BreakBeforeBraces: WebKit
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializersBeforeComma: true
|
||||
ColumnLimit: 80
|
||||
ColumnLimit: 120
|
||||
CommentPragmas: '^ IWYU pragma:'
|
||||
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
||||
ConstructorInitializerIndentWidth: 4
|
||||
@@ -43,13 +31,15 @@ Cpp11BracedListStyle: true
|
||||
DerivePointerAlignment: false
|
||||
DisableFormat: false
|
||||
ExperimentalAutoDetectBinPacking: false
|
||||
FixNamespaceComments: true
|
||||
ForEachMacros: [ Q_FOREACH, BOOST_FOREACH ]
|
||||
IncludeBlocks: Regroup
|
||||
IncludeCategories:
|
||||
- Regex: '^<(BeastConfig)'
|
||||
Priority: 0
|
||||
- Regex: '^<(ripple)/'
|
||||
- Regex: '^".*"$'
|
||||
Priority: 1
|
||||
- Regex: '^<.*\.(h|hpp)>$'
|
||||
Priority: 2
|
||||
- Regex: '^<(boost)/'
|
||||
- Regex: '^<.*>$'
|
||||
Priority: 3
|
||||
- Regex: '.*'
|
||||
Priority: 4
|
||||
@@ -58,6 +48,8 @@ IndentCaseLabels: true
|
||||
IndentFunctionDeclarationAfterType: false
|
||||
IndentWidth: 4
|
||||
IndentWrappedFunctionNames: false
|
||||
IndentRequiresClause: true
|
||||
RequiresClausePosition: OwnLine
|
||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||
MaxEmptyLinesToKeep: 1
|
||||
NamespaceIndentation: None
|
||||
@@ -70,6 +62,7 @@ PenaltyBreakString: 1000
|
||||
PenaltyExcessCharacter: 1000000
|
||||
PenaltyReturnTypeOnItsOwnLine: 200
|
||||
PointerAlignment: Left
|
||||
QualifierAlignment: Right
|
||||
ReflowComments: true
|
||||
SortIncludes: true
|
||||
SpaceAfterCStyleCast: false
|
||||
|
||||
133
.clang-tidy
Normal file
133
.clang-tidy
Normal file
@@ -0,0 +1,133 @@
|
||||
---
|
||||
Checks: '-*,
|
||||
bugprone-argument-comment,
|
||||
bugprone-assert-side-effect,
|
||||
bugprone-bad-signal-to-kill-thread,
|
||||
bugprone-bool-pointer-implicit-conversion,
|
||||
bugprone-copy-constructor-init,
|
||||
bugprone-dangling-handle,
|
||||
bugprone-dynamic-static-initializers,
|
||||
bugprone-empty-catch,
|
||||
bugprone-fold-init-type,
|
||||
bugprone-forward-declaration-namespace,
|
||||
bugprone-inaccurate-erase,
|
||||
bugprone-incorrect-roundings,
|
||||
bugprone-infinite-loop,
|
||||
bugprone-integer-division,
|
||||
bugprone-lambda-function-name,
|
||||
bugprone-macro-parentheses,
|
||||
bugprone-macro-repeated-side-effects,
|
||||
bugprone-misplaced-operator-in-strlen-in-alloc,
|
||||
bugprone-misplaced-pointer-arithmetic-in-alloc,
|
||||
bugprone-misplaced-widening-cast,
|
||||
bugprone-move-forwarding-reference,
|
||||
bugprone-multiple-new-in-one-expression,
|
||||
bugprone-multiple-statement-macro,
|
||||
bugprone-no-escape,
|
||||
bugprone-non-zero-enum-to-bool-conversion,
|
||||
bugprone-parent-virtual-call,
|
||||
bugprone-posix-return,
|
||||
bugprone-redundant-branch-condition,
|
||||
bugprone-reserved-identifier,
|
||||
bugprone-unused-return-value,
|
||||
bugprone-shared-ptr-array-mismatch,
|
||||
bugprone-signal-handler,
|
||||
bugprone-signed-char-misuse,
|
||||
bugprone-sizeof-container,
|
||||
bugprone-sizeof-expression,
|
||||
bugprone-spuriously-wake-up-functions,
|
||||
bugprone-standalone-empty,
|
||||
bugprone-string-constructor,
|
||||
bugprone-string-integer-assignment,
|
||||
bugprone-string-literal-with-embedded-nul,
|
||||
bugprone-stringview-nullptr,
|
||||
bugprone-suspicious-enum-usage,
|
||||
bugprone-suspicious-include,
|
||||
bugprone-suspicious-memory-comparison,
|
||||
bugprone-suspicious-memset-usage,
|
||||
bugprone-suspicious-missing-comma,
|
||||
bugprone-suspicious-realloc-usage,
|
||||
bugprone-suspicious-semicolon,
|
||||
bugprone-suspicious-string-compare,
|
||||
bugprone-swapped-arguments,
|
||||
bugprone-switch-missing-default-case,
|
||||
bugprone-terminating-continue,
|
||||
bugprone-throw-keyword-missing,
|
||||
bugprone-too-small-loop-variable,
|
||||
bugprone-undefined-memory-manipulation,
|
||||
bugprone-undelegated-constructor,
|
||||
bugprone-unhandled-exception-at-new,
|
||||
bugprone-unhandled-self-assignment,
|
||||
bugprone-unique-ptr-array-mismatch,
|
||||
bugprone-unsafe-functions,
|
||||
bugprone-unused-raii,
|
||||
bugprone-use-after-move,
|
||||
bugprone-virtual-near-miss,
|
||||
cppcoreguidelines-init-variables,
|
||||
cppcoreguidelines-misleading-capture-default-by-value,
|
||||
cppcoreguidelines-pro-type-member-init,
|
||||
cppcoreguidelines-pro-type-static-cast-downcast,
|
||||
cppcoreguidelines-rvalue-reference-param-not-moved,
|
||||
cppcoreguidelines-use-default-member-init,
|
||||
cppcoreguidelines-virtual-class-destructor,
|
||||
llvm-namespace-comment,
|
||||
misc-const-correctness,
|
||||
misc-definitions-in-headers,
|
||||
misc-header-include-cycle,
|
||||
misc-include-cleaner,
|
||||
misc-misplaced-const,
|
||||
misc-redundant-expression,
|
||||
misc-static-assert,
|
||||
misc-throw-by-value-catch-by-reference,
|
||||
misc-unused-alias-decls,
|
||||
misc-unused-using-decls,
|
||||
modernize-concat-nested-namespaces,
|
||||
modernize-deprecated-headers,
|
||||
modernize-make-shared,
|
||||
modernize-make-unique,
|
||||
modernize-pass-by-value,
|
||||
modernize-type-traits,
|
||||
modernize-use-emplace,
|
||||
modernize-use-equals-default,
|
||||
modernize-use-equals-delete,
|
||||
modernize-use-override,
|
||||
modernize-use-using,
|
||||
performance-faster-string-find,
|
||||
performance-for-range-copy,
|
||||
performance-implicit-conversion-in-loop,
|
||||
performance-inefficient-vector-operation,
|
||||
performance-move-const-arg,
|
||||
performance-move-constructor-init,
|
||||
performance-no-automatic-move,
|
||||
performance-trivially-destructible,
|
||||
readability-avoid-const-params-in-decls,
|
||||
readability-braces-around-statements,
|
||||
readability-const-return-type,
|
||||
readability-container-contains,
|
||||
readability-container-size-empty,
|
||||
readability-convert-member-functions-to-static,
|
||||
readability-duplicate-include,
|
||||
readability-else-after-return,
|
||||
readability-implicit-bool-conversion,
|
||||
readability-inconsistent-declaration-parameter-name,
|
||||
readability-make-member-function-const,
|
||||
readability-misleading-indentation,
|
||||
readability-non-const-parameter,
|
||||
readability-redundant-declaration,
|
||||
readability-redundant-member-init,
|
||||
readability-redundant-string-init,
|
||||
readability-simplify-boolean-expr,
|
||||
readability-static-accessed-through-instance,
|
||||
readability-static-definition-in-anonymous-namespace,
|
||||
readability-suspicious-call-argument
|
||||
'
|
||||
|
||||
CheckOptions:
|
||||
readability-braces-around-statements.ShortStatementLines: 2
|
||||
bugprone-unsafe-functions.ReportMoreUnsafeFunctions: true
|
||||
bugprone-unused-return-value.CheckedReturnTypes: ::std::error_code;::std::error_condition;::std::errc;::std::expected
|
||||
misc-include-cleaner.IgnoreHeaders: '.*/(detail|impl)/.*'
|
||||
|
||||
HeaderFilterRegex: '^.*/(src|unittests)/.*\.(h|hpp)$'
|
||||
WarningsAsErrors: '*'
|
||||
|
||||
5
.clangd
Normal file
5
.clangd
Normal file
@@ -0,0 +1,5 @@
|
||||
Diagnostics:
|
||||
UnusedIncludes: Strict
|
||||
MissingIncludes: Strict
|
||||
Includes:
|
||||
IgnoreHeader: ".*/(detail|impl)/.*"
|
||||
11
.codecov.yml
Normal file
11
.codecov.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 50%
|
||||
threshold: 2%
|
||||
|
||||
patch:
|
||||
default:
|
||||
target: 20% # Need to bump this number https://docs.codecov.com/docs/commit-status#patch-status
|
||||
threshold: 2%
|
||||
1
.dockerignore
Normal file
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
build/
|
||||
@@ -6,3 +6,5 @@
|
||||
|
||||
# clang-format
|
||||
e41150248a97e4bdc1cf21b54650c4bb7c63928e
|
||||
2e542e7b0d94451a933c88778461cc8d3d7e6417
|
||||
d816ef54abd8e8e979b9c795bdb657a8d18f5e95
|
||||
|
||||
77
.githooks/pre-commit
Executable file
77
.githooks/pre-commit
Executable file
@@ -0,0 +1,77 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec 1>&2
|
||||
|
||||
# paths to check and re-format
|
||||
sources="src unittests"
|
||||
formatter="clang-format -i"
|
||||
version=$($formatter --version | grep -o '[0-9\.]*')
|
||||
|
||||
if [[ "17.0.0" > "$version" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
A minimum of version 17 of `which clang-format` is required.
|
||||
Your version is $version.
|
||||
Please fix paths and run again.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 3
|
||||
fi
|
||||
|
||||
# check there is no .h headers, only .hpp
|
||||
wrong_headers=$(find $sources -name "*.h" | sed 's/^/ - /')
|
||||
if [[ ! -z "$wrong_headers" ]]; then
|
||||
cat <<EOF
|
||||
|
||||
ERROR
|
||||
-----------------------------------------------------------------------------
|
||||
Found .h headers in the source code. Please rename them to .hpp:
|
||||
|
||||
$wrong_headers
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 2
|
||||
fi
|
||||
|
||||
function grep_code {
|
||||
grep -l "${1}" ${sources} -r --include \*.hpp --include \*.cpp
|
||||
}
|
||||
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
# make all includes to be <...> style
|
||||
grep_code '#include ".*"' | xargs sed -i '' -E 's|#include "(.*)"|#include <\1>|g'
|
||||
|
||||
# make local includes to be "..." style
|
||||
main_src_dirs=$(find ./src -maxdepth 1 -type d -exec basename {} \; | tr '\n' '|' | sed 's/|$//' | sed 's/|/\\|/g')
|
||||
grep_code "#include <\($main_src_dirs\)/.*>" | xargs sed -i '' -E "s|#include <(($main_src_dirs)/.*)>|#include \"\1\"|g"
|
||||
else
|
||||
# make all includes to be <...> style
|
||||
grep_code '#include ".*"' | xargs sed -i -E 's|#include "(.*)"|#include <\1>|g'
|
||||
|
||||
# make local includes to be "..." style
|
||||
main_src_dirs=$(find ./src -type d -maxdepth 1 -exec basename {} \; | paste -sd '|' | sed 's/|/\\|/g')
|
||||
grep_code "#include <\($main_src_dirs\)/.*>" | xargs sed -i -E "s|#include <(($main_src_dirs)/.*)>|#include \"\1\"|g"
|
||||
fi
|
||||
|
||||
first=$(git diff $sources)
|
||||
find $sources -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.ipp' \) -print0 | xargs -0 $formatter
|
||||
second=$(git diff $sources)
|
||||
changes=$(diff <(echo "$first") <(echo "$second") | wc -l | sed -e 's/^[[:space:]]*//')
|
||||
|
||||
if [ "$changes" != "0" ]; then
|
||||
cat <<\EOF
|
||||
|
||||
WARNING
|
||||
-----------------------------------------------------------------------------
|
||||
Automatically re-formatted code with `clang-format` - commit was aborted.
|
||||
Please manually add any updated files and commit again.
|
||||
-----------------------------------------------------------------------------
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
18
.github/actions/build_clio/action.yml
vendored
Normal file
18
.github/actions/build_clio/action.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: Build clio
|
||||
description: Build clio in build directory
|
||||
inputs:
|
||||
target:
|
||||
description: Build target name
|
||||
default: all
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
|
||||
- name: Build Clio
|
||||
shell: bash
|
||||
run: |
|
||||
cd build
|
||||
cmake --build . --parallel ${{ steps.number_of_threads.outputs.threads_number }} --target ${{ inputs.target }}
|
||||
36
.github/actions/clang_format/action.yml
vendored
Normal file
36
.github/actions/clang_format/action.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Check format
|
||||
description: Check format using clang-format-17
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Add llvm repo
|
||||
run: |
|
||||
echo 'deb http://apt.llvm.org/focal/ llvm-toolchain-focal-17 main' | sudo tee -a /etc/apt/sources.list
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
|
||||
shell: bash
|
||||
|
||||
- name: Install packages
|
||||
run: |
|
||||
sudo apt update -qq
|
||||
sudo apt install -y jq clang-format-17
|
||||
sudo rm /usr/bin/clang-format
|
||||
sudo ln -s /usr/bin/clang-format-17 /usr/bin/clang-format
|
||||
shell: bash
|
||||
|
||||
- name: Run formatter
|
||||
continue-on-error: true
|
||||
id: run_formatter
|
||||
run: |
|
||||
./.githooks/pre-commit
|
||||
shell: bash
|
||||
|
||||
- name: Check for differences
|
||||
id: assert
|
||||
shell: bash
|
||||
run: |
|
||||
git diff --color --exit-code | tee "clang-format.patch"
|
||||
|
||||
- name: Fail job
|
||||
if: ${{ steps.run_formatter.outcome != 'success' }}
|
||||
shell: bash
|
||||
run: exit 1
|
||||
33
.github/actions/code_coverage/action.yml
vendored
Normal file
33
.github/actions/code_coverage/action.yml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: Generate code coverage report
|
||||
description: Run tests, generate code coverage report and upload it to codecov.io
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
build/clio_tests --backend_host=scylladb
|
||||
|
||||
- name: Run gcovr
|
||||
shell: bash
|
||||
run: |
|
||||
gcovr -e unittests --xml build/coverage_report.xml -j8 --exclude-throw-branches
|
||||
|
||||
- name: Archive coverage report
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage-report.xml
|
||||
path: build/coverage_report.xml
|
||||
retention-days: 30
|
||||
|
||||
- name: Upload coverage report
|
||||
uses: wandalen/wretry.action@v1.3.0
|
||||
with:
|
||||
action: codecov/codecov-action@v3
|
||||
with: |
|
||||
files: build/coverage_report.xml
|
||||
fail_ci_if_error: true
|
||||
verbose: true
|
||||
token: ${{ env.CODECOV_TOKEN }}
|
||||
attempt_limit: 5
|
||||
attempt_delay: 10000
|
||||
41
.github/actions/generate/action.yml
vendored
Normal file
41
.github/actions/generate/action.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Run conan and cmake
|
||||
description: Run conan and cmake
|
||||
inputs:
|
||||
conan_profile:
|
||||
description: Conan profile name
|
||||
required: true
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
default: 'false'
|
||||
build_type:
|
||||
description: Build type for third-party libraries and clio. Could be 'Release', 'Debug'
|
||||
required: true
|
||||
default: 'Release'
|
||||
code_coverage:
|
||||
description: Whether conan's coverage option should be on or not
|
||||
required: true
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Create build directory
|
||||
shell: bash
|
||||
run: mkdir -p build
|
||||
|
||||
- name: Run conan
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_OPTION: "${{ inputs.conan_cache_hit == 'true' && 'missing' || '' }}"
|
||||
CODE_COVERAGE: "${{ inputs.code_coverage == 'true' && 'True' || 'False' }}"
|
||||
run: |
|
||||
cd build
|
||||
conan install .. -of . -b $BUILD_OPTION -s build_type=${{ inputs.build_type }} -o clio:tests=True -o clio:lint=False -o clio:coverage="${CODE_COVERAGE}" --profile ${{ inputs.conan_profile }}
|
||||
|
||||
- name: Run cmake
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_TYPE: "${{ inputs.build_type }}"
|
||||
run: |
|
||||
cd build
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=${{ inputs.build_type }} ${{ inputs.extra_cmake_args }} .. -G Ninja
|
||||
26
.github/actions/get_number_of_threads/action.yml
vendored
Normal file
26
.github/actions/get_number_of_threads/action.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Get number of threads
|
||||
description: Determines number of threads to use on macOS and Linux
|
||||
outputs:
|
||||
threads_number:
|
||||
description: Number of threads to use
|
||||
value: ${{ steps.number_of_threads_export.outputs.num }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Get number of threads on mac
|
||||
id: mac_threads
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(sysctl -n hw.logicalcpu) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get number of threads on Linux
|
||||
id: linux_threads
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: echo "num=$(($(nproc) - 2))" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Export output variable
|
||||
shell: bash
|
||||
id: number_of_threads_export
|
||||
run: |
|
||||
echo "num=${{ steps.mac_threads.outputs.num || steps.linux_threads.outputs.num }}" >> $GITHUB_OUTPUT
|
||||
14
.github/actions/git_common_ancestor/action.yml
vendored
Normal file
14
.github/actions/git_common_ancestor/action.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: Git common ancestor
|
||||
description: Find the closest common commit
|
||||
outputs:
|
||||
commit:
|
||||
description: Hash of commit
|
||||
value: ${{ steps.find_common_ancestor.outputs.commit }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common git ancestor
|
||||
id: find_common_ancestor
|
||||
shell: bash
|
||||
run: |
|
||||
echo "commit=$(git merge-base --fork-point origin/develop)" >> $GITHUB_OUTPUT
|
||||
47
.github/actions/prepare_runner/action.yml
vendored
Normal file
47
.github/actions/prepare_runner/action.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: Prepare runner
|
||||
description: Install packages, set environment variables, create directories
|
||||
inputs:
|
||||
disable_ccache:
|
||||
description: Whether ccache should be disabled
|
||||
required: true
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install packages on mac
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
brew install llvm@14 pkg-config ninja bison cmake ccache jq gh
|
||||
|
||||
- name: Fix git permissions on Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: git config --global --add safe.directory $PWD
|
||||
|
||||
- name: Set env variables for macOS
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CCACHE_DIR=${{ github.workspace }}/.ccache" >> $GITHUB_ENV
|
||||
echo "CONAN_USER_HOME=${{ github.workspace }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set env variables for Linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CCACHE_DIR=/root/.ccache" >> $GITHUB_ENV
|
||||
echo "CONAN_USER_HOME=/root/" >> $GITHUB_ENV
|
||||
|
||||
- name: Set CCACHE_DISABLE=1
|
||||
if: ${{ inputs.disable_ccache == 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "CCACHE_DISABLE=1" >> $GITHUB_ENV
|
||||
|
||||
- name: Create directories
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p $CCACHE_DIR
|
||||
mkdir -p $CONAN_USER_HOME/.conan
|
||||
|
||||
|
||||
59
.github/actions/restore_cache/action.yml
vendored
Normal file
59
.github/actions/restore_cache/action.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Restore cache
|
||||
description: Find and restores conan and ccache cache
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
build_type:
|
||||
description: Current build type (e.g. Release, Debug)
|
||||
required: true
|
||||
default: Release
|
||||
code_coverage:
|
||||
description: Whether code coverage is on
|
||||
required: true
|
||||
default: 'false'
|
||||
outputs:
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
value: ${{ steps.conan_hash.outputs.hash }}
|
||||
conan_cache_hit:
|
||||
description: True if conan cache has been downloaded
|
||||
value: ${{ steps.conan_cache.outputs.cache-hit }}
|
||||
ccache_cache_hit:
|
||||
description: True if ccache cache has been downloaded
|
||||
value: ${{ steps.ccache_cache.outputs.cache-hit }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Calculate conan hash
|
||||
id: conan_hash
|
||||
shell: bash
|
||||
run: |
|
||||
conan info . -j info.json -o clio:tests=True
|
||||
packages_info=$(cat info.json | jq '.[] | "\(.display_name): \(.id)"' | grep -v 'clio')
|
||||
echo "$packages_info"
|
||||
hash=$(echo "$packages_info" | shasum -a 256 | cut -d ' ' -f 1)
|
||||
rm info.json
|
||||
echo "hash=$hash" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Restore conan cache
|
||||
uses: actions/cache/restore@v3
|
||||
id: conan_cache
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-develop-${{ steps.conan_hash.outputs.hash }}
|
||||
|
||||
- name: Restore ccache cache
|
||||
uses: actions/cache/restore@v3
|
||||
id: ccache_cache
|
||||
if: ${{ env.CCACHE_DISABLE != '1' }}
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
56
.github/actions/save_cache/action.yml
vendored
Normal file
56
.github/actions/save_cache/action.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: Save cache
|
||||
description: Save conan and ccache cache for develop branch
|
||||
inputs:
|
||||
conan_dir:
|
||||
description: Path to .conan directory
|
||||
required: true
|
||||
conan_hash:
|
||||
description: Hash to use as a part of conan cache key
|
||||
required: true
|
||||
conan_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
ccache_dir:
|
||||
description: Path to .ccache directory
|
||||
required: true
|
||||
ccache_cache_hit:
|
||||
description: Whether conan cache has been downloaded
|
||||
required: true
|
||||
ccache_cache_miss_rate:
|
||||
description: How many cache misses happened
|
||||
build_type:
|
||||
description: Current build type (e.g. Release, Debug)
|
||||
required: true
|
||||
default: Release
|
||||
code_coverage:
|
||||
description: Whether code coverage is on
|
||||
required: true
|
||||
default: 'false'
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Find common commit
|
||||
id: git_common_ancestor
|
||||
uses: ./.github/actions/git_common_ancestor
|
||||
|
||||
- name: Cleanup conan directory from extra data
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
conan remove "*" -s -b -f
|
||||
|
||||
- name: Save conan cache
|
||||
if: ${{ inputs.conan_cache_hit != 'true' }}
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: ${{ inputs.conan_dir }}/data
|
||||
key: clio-conan_data-${{ runner.os }}-${{ inputs.build_type }}-develop-${{ inputs.conan_hash }}
|
||||
|
||||
- name: Save ccache cache
|
||||
if: ${{ inputs.ccache_cache_hit != 'true' || inputs.ccache_cache_miss_rate == '100.0' }}
|
||||
uses: actions/cache/save@v3
|
||||
with:
|
||||
path: ${{ inputs.ccache_dir }}
|
||||
key: clio-ccache-${{ runner.os }}-${{ inputs.build_type }}${{ inputs.code_coverage == 'true' && '-code_coverage' || '' }}-develop-${{ steps.git_common_ancestor.outputs.commit }}
|
||||
|
||||
|
||||
52
.github/actions/setup_conan/action.yml
vendored
Normal file
52
.github/actions/setup_conan/action.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Setup conan
|
||||
description: Setup conan profile and artifactory
|
||||
outputs:
|
||||
conan_profile:
|
||||
description: Created conan profile name
|
||||
value: ${{ steps.conan_export_output.outputs.conan_profile }}
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: On mac
|
||||
if: ${{ runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
env:
|
||||
CONAN_PROFILE: clio_clang_14
|
||||
id: conan_setup_mac
|
||||
run: |
|
||||
echo "Creating $CONAN_PROFILE conan profile";
|
||||
clang_path="$(brew --prefix llvm@14)/bin/clang"
|
||||
clang_cxx_path="$(brew --prefix llvm@14)/bin/clang++"
|
||||
conan profile new $CONAN_PROFILE --detect --force
|
||||
conan profile update settings.compiler=clang $CONAN_PROFILE
|
||||
conan profile update settings.compiler.version=14 $CONAN_PROFILE
|
||||
conan profile update settings.compiler.cppstd=20 $CONAN_PROFILE
|
||||
conan profile update "conf.tools.build:compiler_executables={\"c\": \"$clang_path\", \"cpp\": \"$clang_cxx_path\"}" $CONAN_PROFILE
|
||||
conan profile update env.CC="$clang_path" $CONAN_PROFILE
|
||||
conan profile update env.CXX="$clang_cxx_path" $CONAN_PROFILE
|
||||
echo "created_conan_profile=$CONAN_PROFILE" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: On linux
|
||||
if: ${{ runner.os == 'Linux' }}
|
||||
shell: bash
|
||||
id: conan_setup_linux
|
||||
run: |
|
||||
echo "created_conan_profile=default" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Export output variable
|
||||
shell: bash
|
||||
id: conan_export_output
|
||||
run: |
|
||||
echo "conan_profile=${{ steps.conan_setup_mac.outputs.created_conan_profile || steps.conan_setup_linux.outputs.created_conan_profile }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Add conan-non-prod artifactory
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z $(conan remote list | grep conan-non-prod) ]]; then
|
||||
echo "Adding conan-non-prod"
|
||||
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
else
|
||||
echo "Conan-non-prod is available"
|
||||
fi
|
||||
|
||||
|
||||
6
.github/actions/test/Dockerfile
vendored
Normal file
6
.github/actions/test/Dockerfile
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
FROM cassandra:4.0.4
|
||||
|
||||
RUN apt-get update && apt-get install -y postgresql
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
8
.github/actions/test/entrypoint.sh
vendored
Executable file
8
.github/actions/test/entrypoint.sh
vendored
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
pg_ctlcluster 12 main start
|
||||
su postgres -c"psql -c\"alter user postgres with password 'postgres'\""
|
||||
su cassandra -c "/opt/cassandra/bin/cassandra -R"
|
||||
sleep 90
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests
|
||||
168
.github/workflows/build.yml
vendored
168
.github/workflows/build.yml
vendored
@@ -1,43 +1,151 @@
|
||||
name: Build Clio
|
||||
name: Build
|
||||
on:
|
||||
push:
|
||||
branches: [master, develop]
|
||||
branches: [master, release/*, develop]
|
||||
pull_request:
|
||||
branches: [master, develop]
|
||||
|
||||
branches: [master, release/*, develop]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
name: Check format
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Get source
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run clang-format
|
||||
uses: XRPLF/clio-build/lint@main
|
||||
|
||||
build_clio:
|
||||
uses: ./.github/actions/clang_format
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-20.04
|
||||
needs: lint
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Release
|
||||
code_coverage: false
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
build_type: Debug
|
||||
code_coverage: true
|
||||
# - os: macOS
|
||||
# build_type: Release
|
||||
# code_coverage: false
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
services:
|
||||
scylladb:
|
||||
image: ${{ (matrix.code_coverage) && 'scylladb/scylla' || '' }}
|
||||
options: >-
|
||||
--health-cmd "cqlsh -e 'describe cluster'"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- name: Get clio repo
|
||||
uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
path: clio_src
|
||||
|
||||
- name: Get gha repo
|
||||
uses: actions/checkout@v3
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
repository: XRPLF/clio-build
|
||||
ref: main
|
||||
path: gha # must be the same as defined in XRPLF/clio-build
|
||||
|
||||
- name: Build
|
||||
uses: XRPLF/clio-build/build@main
|
||||
|
||||
# - name: Artifact clio_tests
|
||||
# uses: actions/upload-artifact@v2
|
||||
# with:
|
||||
# name: clio_output
|
||||
# path: clio_src/build/clio_tests
|
||||
disable_ccache: false
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Show ccache's statistics
|
||||
shell: bash
|
||||
id: ccache_stats
|
||||
run: |
|
||||
ccache -s > /tmp/ccache.stats
|
||||
miss_rate=$(cat /tmp/ccache.stats | grep 'Misses' | head -n1 | sed 's/.*(\(.*\)%).*/\1/')
|
||||
echo "miss_rate=${miss_rate}" >> $GITHUB_OUTPUT
|
||||
cat /tmp/ccache.stats
|
||||
|
||||
- name: Strip tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_server
|
||||
|
||||
- name: Upload clio_tests
|
||||
if: ${{ !matrix.code_coverage }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Save cache
|
||||
uses: ./.github/actions/save_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
conan_hash: ${{ steps.restore_cache.outputs.conan_hash }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
ccache_cache_hit: ${{ steps.restore_cache.outputs.ccache_cache_hit }}
|
||||
ccache_cache_miss_rate: ${{ steps.ccache_stats.outputs.miss_rate }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
code_coverage: ${{ matrix.code_coverage }}
|
||||
|
||||
# TODO: This is not a part of build process but it is the easiest way to do it here.
|
||||
# It will be refactored in https://github.com/XRPLF/clio/issues/1075
|
||||
- name: Run code coverage
|
||||
if: ${{ matrix.code_coverage }}
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
uses: ./.github/actions/code_coverage
|
||||
|
||||
test:
|
||||
name: Run Tests
|
||||
needs: build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: heavy
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
# - os: macOS
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
|
||||
116
.github/workflows/clang-tidy.yml
vendored
Normal file
116
.github/workflows/clang-tidy.yml
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
name: Clang-tidy check
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 6 * * 1-5"
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .clang_tidy
|
||||
- .github/workflows/clang-tidy.yml
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
clang_tidy:
|
||||
runs-on: [self-hosted, Linux]
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
|
||||
- name: Restore cache
|
||||
uses: ./.github/actions/restore_cache
|
||||
id: restore_cache
|
||||
with:
|
||||
conan_dir: ${{ env.CONAN_USER_HOME }}/.conan
|
||||
ccache_dir: ${{ env.CCACHE_DIR }}
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: Release
|
||||
|
||||
- name: Get number of threads
|
||||
uses: ./.github/actions/get_number_of_threads
|
||||
id: number_of_threads
|
||||
|
||||
- name: Run clang-tidy
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
id: run_clang_tidy
|
||||
run: |
|
||||
run-clang-tidy-17 -p build -j ${{ steps.number_of_threads.outputs.threads_number }} -fix -quiet 1>output.txt
|
||||
|
||||
- name: Run pre-commit hook
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
continue-on-error: true
|
||||
shell: bash
|
||||
run: ./.githooks/pre-commit
|
||||
|
||||
- name: Print issues found
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
shell: bash
|
||||
run: |
|
||||
sed -i '/error\||/!d' ./output.txt
|
||||
cat output.txt
|
||||
rm output.txt
|
||||
|
||||
- name: Create an issue
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
id: create_issue
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
echo -e 'Clang-tidy found issues in the code:\n' > issue.md
|
||||
echo -e "List of the issues found: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}/" >> issue.md
|
||||
gh issue create --assignee 'cindyyan317,godexsoft,kuznetsss' --label bug --title 'Clang-tidy found bugs in code🐛' --body-file ./issue.md > create_issue.log
|
||||
created_issue=$(cat create_issue.log | sed 's|.*/||')
|
||||
echo "created_issue=$created_issue" >> $GITHUB_OUTPUT
|
||||
rm create_issue.log issue.md
|
||||
|
||||
- uses: crazy-max/ghaction-import-gpg@v5
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.ACTIONS_GPG_PRIVATE_KEY }}
|
||||
passphrase: ${{ secrets.ACTIONS_GPG_PASSPHRASE }}
|
||||
git_user_signingkey: true
|
||||
git_commit_gpgsign: true
|
||||
|
||||
- name: Create PR with fixes
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
commit-message: "[CI] clang-tidy auto fixes"
|
||||
committer: Clio CI <skuznetsov@ripple.com>
|
||||
branch: "clang_tidy/autofix"
|
||||
branch-suffix: timestamp
|
||||
delete-branch: true
|
||||
title: "[CI] clang-tidy auto fixes"
|
||||
body: "Fixes #${{ steps.create_issue.outputs.created_issue }}. Please review and commit clang-tidy fixes."
|
||||
reviewers: "cindyyan317,godexsoft,kuznetsss"
|
||||
|
||||
- name: Fail the job
|
||||
if: ${{ steps.run_clang_tidy.outcome != 'success' }}
|
||||
shell: bash
|
||||
run: exit 1
|
||||
29
.github/workflows/clang-tidy_on_fix_merged.yml
vendored
Normal file
29
.github/workflows/clang-tidy_on_fix_merged.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Restart clang-tidy workflow
|
||||
on:
|
||||
push:
|
||||
branches: [develop]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
restart_clang_tidy:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
permissions:
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check last commit matches clang-tidy auto fixes
|
||||
id: check
|
||||
shell: bash
|
||||
run: |
|
||||
passed=$(if [[ $(git log -1 --pretty=format:%s | grep '\[CI\] clang-tidy auto fixes') ]]; then echo 'true' ; else echo 'false' ; fi)
|
||||
echo "passed=$passed" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Run clang-tidy workflow
|
||||
if: ${{ contains(steps.check.outputs.passed, 'true') }}
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
run: gh workflow run clang-tidy.yml
|
||||
138
.github/workflows/nightly.yml
vendored
Normal file
138
.github/workflows/nightly.yml
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
name: Nightly release
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 5 * * 1-5'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build clio
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
# - os: macOS
|
||||
# build_type: Release
|
||||
- os: heavy
|
||||
build_type: Release
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
- os: heavy
|
||||
build_type: Debug
|
||||
container:
|
||||
image: rippleci/clio_ci:latest
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
container: ${{ matrix.container }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: ./.github/actions/prepare_runner
|
||||
with:
|
||||
disable_ccache: true
|
||||
|
||||
- name: Setup conan
|
||||
uses: ./.github/actions/setup_conan
|
||||
id: conan
|
||||
|
||||
- name: Run conan and cmake
|
||||
uses: ./.github/actions/generate
|
||||
with:
|
||||
conan_profile: ${{ steps.conan.outputs.conan_profile }}
|
||||
conan_cache_hit: ${{ steps.restore_cache.outputs.conan_cache_hit }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
|
||||
- name: Build Clio
|
||||
uses: ./.github/actions/build_clio
|
||||
|
||||
- name: Strip tests
|
||||
run: strip build/clio_tests
|
||||
|
||||
- name: Upload clio_tests
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_tests
|
||||
|
||||
- name: Compress clio_server
|
||||
shell: bash
|
||||
run: |
|
||||
cd build
|
||||
tar czf ./clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz ./clio_server
|
||||
|
||||
- name: Upload clio_server
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: clio_server_${{ runner.os }}_${{ matrix.build_type }}
|
||||
path: build/clio_server_${{ runner.os }}_${{ matrix.build_type }}.tar.gz
|
||||
|
||||
|
||||
run_tests:
|
||||
needs: build
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
# - os: macOS
|
||||
# build_type: Release
|
||||
- os: heavy
|
||||
build_type: Release
|
||||
- os: heavy
|
||||
build_type: Debug
|
||||
runs-on: [self-hosted, "${{ matrix.os }}"]
|
||||
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: clio_tests_${{ runner.os }}_${{ matrix.build_type }}
|
||||
|
||||
- name: Run clio_tests
|
||||
run: |
|
||||
chmod +x ./clio_tests
|
||||
./clio_tests --gtest_filter="-BackendCassandraBaseTest*:BackendCassandraTest*:BackendCassandraFactoryTestWithDB*"
|
||||
|
||||
nightly_release:
|
||||
needs: run_tests
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
GH_REPO: ${{ github.repository }}
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: nightly_release
|
||||
|
||||
- name: Prepare files
|
||||
shell: bash
|
||||
run: |
|
||||
cp ${{ github.workspace }}/.github/workflows/nightly_notes.md "${RUNNER_TEMP}/nightly_notes.md"
|
||||
cd nightly_release
|
||||
rm -r clio_tests*
|
||||
for d in $(ls); do
|
||||
archive_name=$(ls $d)
|
||||
mv ${d}/${archive_name} ./
|
||||
rm -r $d
|
||||
sha256sum ./$archive_name > ./${archive_name}.sha256sum
|
||||
cat ./$archive_name.sha256sum >> "${RUNNER_TEMP}/nightly_notes.md"
|
||||
done
|
||||
echo '```' >> "${RUNNER_TEMP}/nightly_notes.md"
|
||||
|
||||
- name: Remove current nightly release and nightly tag
|
||||
shell: bash
|
||||
run: |
|
||||
gh release delete nightly --yes || true
|
||||
git push origin :nightly || true
|
||||
|
||||
- name: Publish nightly release
|
||||
shell: bash
|
||||
run: |
|
||||
gh release create nightly --prerelease --title "Clio development (nightly) build" \
|
||||
--target $GITHUB_SHA --notes-file "${RUNNER_TEMP}/nightly_notes.md" \
|
||||
./nightly_release/clio_server*
|
||||
6
.github/workflows/nightly_notes.md
vendored
Normal file
6
.github/workflows/nightly_notes.md
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
> **Note:** Please remember that this is a development release and it is not recommended for production use.
|
||||
|
||||
Changelog (including previous releases): https://github.com/XRPLF/clio/commits/nightly
|
||||
|
||||
## SHA256 checksums
|
||||
```
|
||||
40
.github/workflows/update_docker_ci.yml
vendored
Normal file
40
.github/workflows/update_docker_ci.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: Update CI docker image
|
||||
on:
|
||||
push:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- 'docker/ci/**'
|
||||
- .github/workflows/update_docker_ci.yml
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build_and_push:
|
||||
name: Build and push docker image
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_PW }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
- uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: rippleci/clio_ci
|
||||
tags: |
|
||||
type=raw,value=latest
|
||||
type=raw,value=gcc_11
|
||||
type=raw,value=${{ env.GITHUB_SHA }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ${{ github.workspace }}/docker/ci
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,2 +1,10 @@
|
||||
build/
|
||||
*clio*.log
|
||||
/build*/
|
||||
.devcontainer
|
||||
.build
|
||||
.cache
|
||||
.vscode
|
||||
.python-version
|
||||
CMakeUserPresets.json
|
||||
config.json
|
||||
src/main/impl/Build.cpp
|
||||
|
||||
41
CMake/Build.cpp.in
Normal file
41
CMake/Build.cpp.in
Normal file
@@ -0,0 +1,41 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "main/Build.hpp"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace Build {
|
||||
static constexpr char versionString[] = "@VERSION@";
|
||||
|
||||
std::string const&
|
||||
getClioVersionString()
|
||||
{
|
||||
static std::string const value = versionString;
|
||||
return value;
|
||||
}
|
||||
|
||||
std::string const&
|
||||
getClioFullVersionString()
|
||||
{
|
||||
static std::string const value = "clio-" + getClioVersionString();
|
||||
return value;
|
||||
}
|
||||
|
||||
} // namespace Build
|
||||
5
CMake/Ccache.cmake
Normal file
5
CMake/Ccache.cmake
Normal file
@@ -0,0 +1,5 @@
|
||||
find_program (CCACHE_PATH "ccache")
|
||||
if (CCACHE_PATH)
|
||||
set (CMAKE_CXX_COMPILER_LAUNCHER "${CCACHE_PATH}")
|
||||
message (STATUS "Using ccache: ${CCACHE_PATH}")
|
||||
endif ()
|
||||
42
CMake/CheckCompiler.cmake
Normal file
42
CMake/CheckCompiler.cmake
Normal file
@@ -0,0 +1,42 @@
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14)
|
||||
message (FATAL_ERROR "Clang 14+ required for building clio")
|
||||
endif ()
|
||||
set (is_clang TRUE)
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14)
|
||||
message (FATAL_ERROR "AppleClang 14+ required for building clio")
|
||||
endif ()
|
||||
set (is_appleclang TRUE)
|
||||
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11)
|
||||
message (FATAL_ERROR "GCC 11+ required for building clio")
|
||||
endif ()
|
||||
set (is_gcc TRUE)
|
||||
else ()
|
||||
message (FATAL_ERROR "Supported compilers: AppleClang 14+, Clang 14+, GCC 11+")
|
||||
endif ()
|
||||
|
||||
if (san)
|
||||
string (TOLOWER ${san} san)
|
||||
set (SAN_FLAG "-fsanitize=${san}")
|
||||
set (SAN_LIB "")
|
||||
if (is_gcc)
|
||||
if (san STREQUAL "address")
|
||||
set (SAN_LIB "asan")
|
||||
elseif (san STREQUAL "thread")
|
||||
set (SAN_LIB "tsan")
|
||||
elseif (san STREQUAL "memory")
|
||||
set (SAN_LIB "msan")
|
||||
elseif (san STREQUAL "undefined")
|
||||
set (SAN_LIB "ubsan")
|
||||
endif ()
|
||||
endif ()
|
||||
set (_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
|
||||
set (CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
|
||||
CHECK_CXX_COMPILER_FLAG (${SAN_FLAG} COMPILER_SUPPORTS_SAN)
|
||||
set (CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
|
||||
if (NOT COMPILER_SUPPORTS_SAN)
|
||||
message (FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
|
||||
endif ()
|
||||
endif ()
|
||||
31
CMake/ClangTidy.cmake
Normal file
31
CMake/ClangTidy.cmake
Normal file
@@ -0,0 +1,31 @@
|
||||
if (lint)
|
||||
|
||||
# Find clang-tidy binary
|
||||
if (DEFINED ENV{CLIO_CLANG_TIDY_BIN})
|
||||
set (_CLANG_TIDY_BIN $ENV{CLIO_CLANG_TIDY_BIN})
|
||||
if ((NOT EXISTS ${_CLANG_TIDY_BIN}) OR IS_DIRECTORY ${_CLANG_TIDY_BIN})
|
||||
message (FATAL_ERROR "$ENV{CLIO_CLANG_TIDY_BIN} no such file. Check CLIO_CLANG_TIDY_BIN env variable")
|
||||
endif ()
|
||||
message (STATUS "Using clang-tidy from CLIO_CLANG_TIDY_BIN")
|
||||
else ()
|
||||
find_program (_CLANG_TIDY_BIN NAMES "clang-tidy-17" "clang-tidy" REQUIRED)
|
||||
endif ()
|
||||
|
||||
if (NOT _CLANG_TIDY_BIN)
|
||||
message (FATAL_ERROR
|
||||
"clang-tidy binary not found. Please set the CLIO_CLANG_TIDY_BIN environment variable or install clang-tidy.")
|
||||
endif ()
|
||||
|
||||
# Support for https://github.com/matus-chochlik/ctcache
|
||||
find_program (CLANG_TIDY_CACHE_PATH NAMES "clang-tidy-cache")
|
||||
if (CLANG_TIDY_CACHE_PATH)
|
||||
set (_CLANG_TIDY_CMD
|
||||
"${CLANG_TIDY_CACHE_PATH};${_CLANG_TIDY_BIN}"
|
||||
CACHE STRING "A combined command to run clang-tidy with caching wrapper")
|
||||
else ()
|
||||
set(_CLANG_TIDY_CMD "${_CLANG_TIDY_BIN}")
|
||||
endif ()
|
||||
|
||||
set (CMAKE_CXX_CLANG_TIDY "${_CLANG_TIDY_CMD};--quiet")
|
||||
message (STATUS "Using clang-tidy: ${CMAKE_CXX_CLANG_TIDY}")
|
||||
endif ()
|
||||
39
CMake/ClioVersion.cmake
Normal file
39
CMake/ClioVersion.cmake
Normal file
@@ -0,0 +1,39 @@
|
||||
#[===================================================================[
|
||||
write version to source
|
||||
#]===================================================================]
|
||||
|
||||
find_package (Git REQUIRED)
|
||||
|
||||
set (GIT_COMMAND rev-parse --short HEAD)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE REV OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
set (GIT_COMMAND branch --show-current)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE BRANCH OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
if (BRANCH STREQUAL "")
|
||||
set (BRANCH "dev")
|
||||
endif ()
|
||||
|
||||
if (NOT (BRANCH MATCHES master OR BRANCH MATCHES release/*)) # for develop and any other branch name YYYYMMDDHMS-<branch>-<git-rev>
|
||||
execute_process (COMMAND date +%Y%m%d%H%M%S OUTPUT_VARIABLE DATE OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (VERSION "${DATE}-${BRANCH}-${REV}")
|
||||
else ()
|
||||
set (GIT_COMMAND describe --tags)
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} ${GIT_COMMAND}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE TAG_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
set (VERSION "${TAG_VERSION}-${REV}")
|
||||
endif ()
|
||||
|
||||
if (CMAKE_BUILD_TYPE MATCHES Debug)
|
||||
set (VERSION "${VERSION}+DEBUG")
|
||||
endif ()
|
||||
|
||||
message (STATUS "Build version: ${VERSION}")
|
||||
set (clio_version "${VERSION}")
|
||||
|
||||
configure_file (CMake/Build.cpp.in ${CMAKE_SOURCE_DIR}/src/main/impl/Build.cpp)
|
||||
440
CMake/CodeCoverage.cmake
Normal file
440
CMake/CodeCoverage.cmake
Normal file
@@ -0,0 +1,440 @@
|
||||
# Copyright (c) 2012 - 2017, Lars Bilke
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
# may be used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# CHANGES:
|
||||
#
|
||||
# 2012-01-31, Lars Bilke
|
||||
# - Enable Code Coverage
|
||||
#
|
||||
# 2013-09-17, Joakim Söderberg
|
||||
# - Added support for Clang.
|
||||
# - Some additional usage instructions.
|
||||
#
|
||||
# 2016-02-03, Lars Bilke
|
||||
# - Refactored functions to use named parameters
|
||||
#
|
||||
# 2017-06-02, Lars Bilke
|
||||
# - Merged with modified version from github.com/ufz/ogs
|
||||
#
|
||||
# 2019-05-06, Anatolii Kurotych
|
||||
# - Remove unnecessary --coverage flag
|
||||
#
|
||||
# 2019-12-13, FeRD (Frank Dana)
|
||||
# - Deprecate COVERAGE_LCOVR_EXCLUDES and COVERAGE_GCOVR_EXCLUDES lists in favor
|
||||
# of tool-agnostic COVERAGE_EXCLUDES variable, or EXCLUDE setup arguments.
|
||||
# - CMake 3.4+: All excludes can be specified relative to BASE_DIRECTORY
|
||||
# - All setup functions: accept BASE_DIRECTORY, EXCLUDE list
|
||||
# - Set lcov basedir with -b argument
|
||||
# - Add automatic --demangle-cpp in lcovr, if 'c++filt' is available (can be
|
||||
# overridden with NO_DEMANGLE option in setup_target_for_coverage_lcovr().)
|
||||
# - Delete output dir, .info file on 'make clean'
|
||||
# - Remove Python detection, since version mismatches will break gcovr
|
||||
# - Minor cleanup (lowercase function names, update examples...)
|
||||
#
|
||||
# 2019-12-19, FeRD (Frank Dana)
|
||||
# - Rename Lcov outputs, make filtered file canonical, fix cleanup for targets
|
||||
#
|
||||
# 2020-01-19, Bob Apthorpe
|
||||
# - Added gfortran support
|
||||
#
|
||||
# 2020-02-17, FeRD (Frank Dana)
|
||||
# - Make all add_custom_target()s VERBATIM to auto-escape wildcard characters
|
||||
# in EXCLUDEs, and remove manual escaping from gcovr targets
|
||||
#
|
||||
# 2021-01-19, Robin Mueller
|
||||
# - Add CODE_COVERAGE_VERBOSE option which will allow to print out commands which are run
|
||||
# - Added the option for users to set the GCOVR_ADDITIONAL_ARGS variable to supply additional
|
||||
# flags to the gcovr command
|
||||
#
|
||||
# 2020-05-04, Mihchael Davis
|
||||
# - Add -fprofile-abs-path to make gcno files contain absolute paths
|
||||
# - Fix BASE_DIRECTORY not working when defined
|
||||
# - Change BYPRODUCT from folder to index.html to stop ninja from complaining about double defines
|
||||
#
|
||||
# 2021-05-10, Martin Stump
|
||||
# - Check if the generator is multi-config before warning about non-Debug builds
|
||||
#
|
||||
# 2022-02-22, Marko Wehle
|
||||
# - Change gcovr output from -o <filename> for --xml <filename> and --html <filename> output respectively.
|
||||
# This will allow for Multiple Output Formats at the same time by making use of GCOVR_ADDITIONAL_ARGS, e.g. GCOVR_ADDITIONAL_ARGS "--txt".
|
||||
#
|
||||
# 2022-09-28, Sebastian Mueller
|
||||
# - fix append_coverage_compiler_flags_to_target to correctly add flags
|
||||
# - replace "-fprofile-arcs -ftest-coverage" with "--coverage" (equivalent)
|
||||
#
|
||||
# 2023-12-15, Bronek Kozicki
|
||||
# - remove setup_target_for_coverage_lcov (slow) and setup_target_for_coverage_fastcov (no support for Clang)
|
||||
# - fix Clang support by adding find_program( ... llvm-cov )
|
||||
# - add Apple Clang support by adding execute_process( COMMAND xcrun -f llvm-cov ... )
|
||||
# - add CODE_COVERAGE_GCOV_TOOL to explicitly select gcov tool and disable find_program
|
||||
# - replace both functions setup_target_for_coverage_gcovr_* with single setup_target_for_coverage_gcovr
|
||||
# - add support for all gcovr output formats
|
||||
#
|
||||
# USAGE:
|
||||
#
|
||||
# 1. Copy this file into your cmake modules path.
|
||||
#
|
||||
# 2. Add the following line to your CMakeLists.txt (best inside an if-condition
|
||||
# using a CMake option() to enable it just optionally):
|
||||
# include(CodeCoverage)
|
||||
#
|
||||
# 3. Append necessary compiler flags for all supported source files:
|
||||
# append_coverage_compiler_flags()
|
||||
# Or for specific target:
|
||||
# append_coverage_compiler_flags_to_target(YOUR_TARGET_NAME)
|
||||
#
|
||||
# 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og
|
||||
#
|
||||
# 4. If you need to exclude additional directories from the report, specify them
|
||||
# using full paths in the COVERAGE_EXCLUDES variable before calling
|
||||
# setup_target_for_coverage_*().
|
||||
# Example:
|
||||
# set(COVERAGE_EXCLUDES
|
||||
# '${PROJECT_SOURCE_DIR}/src/dir1/*'
|
||||
# '/path/to/my/src/dir2/*')
|
||||
# Or, use the EXCLUDE argument to setup_target_for_coverage_*().
|
||||
# Example:
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# EXCLUDE "${PROJECT_SOURCE_DIR}/src/dir1/*" "/path/to/my/src/dir2/*")
|
||||
#
|
||||
# 4.a NOTE: With CMake 3.4+, COVERAGE_EXCLUDES or EXCLUDE can also be set
|
||||
# relative to the BASE_DIRECTORY (default: PROJECT_SOURCE_DIR)
|
||||
# Example:
|
||||
# set(COVERAGE_EXCLUDES "dir1/*")
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# FORMAT html-details
|
||||
# BASE_DIRECTORY "${PROJECT_SOURCE_DIR}/src"
|
||||
# EXCLUDE "dir2/*")
|
||||
#
|
||||
# 4.b If you need to pass specific options to gcovr, specify them in
|
||||
# GCOVR_ADDITIONAL_ARGS variable.
|
||||
# Example:
|
||||
# set (GCOVR_ADDITIONAL_ARGS --exclude-throw-branches --exclude-noncode-lines -s)
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# EXCLUDE "src/dir1" "src/dir2")
|
||||
#
|
||||
# 5. Use the functions described below to create a custom make target which
|
||||
# runs your test executable and produces a code coverage report.
|
||||
#
|
||||
# 6. Build a Debug build:
|
||||
# cmake -DCMAKE_BUILD_TYPE=Debug ..
|
||||
# make
|
||||
# make my_coverage_target
|
||||
|
||||
include(CMakeParseArguments)
|
||||
|
||||
option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE)
|
||||
|
||||
# Check prereqs
|
||||
find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test)
|
||||
|
||||
if (DEFINED CODE_COVERAGE_GCOV_TOOL)
|
||||
set(GCOV_TOOL "${CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif (DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
|
||||
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if (APPLE)
|
||||
execute_process( COMMAND xcrun -f llvm-cov
|
||||
OUTPUT_VARIABLE LLVMCOV_PATH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
else()
|
||||
find_program( LLVMCOV_PATH llvm-cov )
|
||||
endif()
|
||||
if(LLVMCOV_PATH)
|
||||
set(GCOV_TOOL "${LLVMCOV_PATH} gcov")
|
||||
endif()
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
|
||||
find_program( GCOV_PATH gcov )
|
||||
set(GCOV_TOOL "${GCOV_PATH}")
|
||||
endif()
|
||||
|
||||
# Check supported compiler (Clang, GNU and Flang)
|
||||
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
|
||||
foreach(LANG ${LANGUAGES})
|
||||
if("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
|
||||
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
|
||||
endif()
|
||||
elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU"
|
||||
AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang")
|
||||
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
set(COVERAGE_COMPILER_FLAGS "-g --coverage"
|
||||
CACHE INTERNAL "")
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
|
||||
include(CheckCXXCompilerFlag)
|
||||
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
|
||||
if(HAVE_cxx_fprofile_abs_path)
|
||||
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif()
|
||||
include(CheckCCompilerFlag)
|
||||
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
|
||||
if(HAVE_c_fprofile_abs_path)
|
||||
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(CMAKE_Fortran_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the Fortran compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_CXX_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the C++ compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_C_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the C compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||
""
|
||||
CACHE STRING "Flags used for linking binaries during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
|
||||
""
|
||||
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
|
||||
FORCE )
|
||||
mark_as_advanced(
|
||||
CMAKE_Fortran_FLAGS_COVERAGE
|
||||
CMAKE_CXX_FLAGS_COVERAGE
|
||||
CMAKE_C_FLAGS_COVERAGE
|
||||
CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||
CMAKE_SHARED_LINKER_FLAGS_COVERAGE )
|
||||
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
|
||||
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
|
||||
endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
|
||||
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
link_libraries(gcov)
|
||||
endif()
|
||||
|
||||
# Defines a target for running and collection code coverage information
|
||||
# Builds dependencies, runs the given executable and outputs reports.
|
||||
# NOTE! The executable should always have a ZERO as exit code otherwise
|
||||
# the coverage generation will not complete.
|
||||
#
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME ctest_coverage # New target name
|
||||
# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR
|
||||
# DEPENDENCIES executable_target # Dependencies to build first
|
||||
# BASE_DIRECTORY "../" # Base directory for report
|
||||
# # (defaults to PROJECT_SOURCE_DIR)
|
||||
# FORMAT "cobertura" # Output format, one of:
|
||||
# # xml cobertura sonarqube json-summary
|
||||
# # json-details coveralls csv txt
|
||||
# # html-single html-nested html-details
|
||||
# # (xml is an alias to cobertura;
|
||||
# # if no format is set, defaults to xml)
|
||||
# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative
|
||||
# # to BASE_DIRECTORY, with CMake 3.4+)
|
||||
# )
|
||||
# The user can set the variable GCOVR_ADDITIONAL_ARGS to supply additional flags to the
|
||||
# GCVOR command.
|
||||
function(setup_target_for_coverage_gcovr)
|
||||
set(options NONE)
|
||||
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
|
||||
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
|
||||
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
if(NOT GCOV_TOOL)
|
||||
message(FATAL_ERROR "Could not find gcov or llvm-cov tool! Aborting...")
|
||||
endif()
|
||||
|
||||
if(NOT GCOVR_PATH)
|
||||
message(FATAL_ERROR "Could not find gcovr tool! Aborting...")
|
||||
endif()
|
||||
|
||||
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
|
||||
if(DEFINED Coverage_BASE_DIRECTORY)
|
||||
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
|
||||
else()
|
||||
set(BASEDIR ${PROJECT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED Coverage_FORMAT)
|
||||
set(Coverage_FORMAT xml)
|
||||
endif()
|
||||
|
||||
if("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
|
||||
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
|
||||
else()
|
||||
if((Coverage_FORMAT STREQUAL "html-details")
|
||||
OR (Coverage_FORMAT STREQUAL "html-nested"))
|
||||
set(GCOVR_OUTPUT_FILE ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html)
|
||||
set(GCOVR_CREATE_FOLDER ${PROJECT_BINARY_DIR}/${Coverage_NAME})
|
||||
elseif(Coverage_FORMAT STREQUAL "html-single")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.html)
|
||||
elseif((Coverage_FORMAT STREQUAL "json-summary")
|
||||
OR (Coverage_FORMAT STREQUAL "json-details")
|
||||
OR (Coverage_FORMAT STREQUAL "coveralls"))
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.json)
|
||||
elseif(Coverage_FORMAT STREQUAL "txt")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.txt)
|
||||
elseif(Coverage_FORMAT STREQUAL "csv")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.csv)
|
||||
else()
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.xml)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if ((Coverage_FORMAT STREQUAL "cobertura")
|
||||
OR (Coverage_FORMAT STREQUAL "xml"))
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura-pretty )
|
||||
set(Coverage_FORMAT cobertura) # overwrite xml
|
||||
elseif(Coverage_FORMAT STREQUAL "sonarqube")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --sonarqube "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "json-summary")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "json-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "coveralls")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "csv")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --csv "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "txt")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --txt "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "html-single")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-self-contained)
|
||||
elseif(Coverage_FORMAT STREQUAL "html-nested")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-nested "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "html-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-details "${GCOVR_OUTPUT_FILE}" )
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported output style ${Coverage_FORMAT}! Aborting...")
|
||||
endif()
|
||||
|
||||
# Collect excludes (CMake 3.4+: Also compute absolute paths)
|
||||
set(GCOVR_EXCLUDES "")
|
||||
foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
|
||||
if(CMAKE_VERSION VERSION_GREATER 3.4)
|
||||
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
|
||||
endif()
|
||||
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
|
||||
|
||||
# Combine excludes to several -e arguments
|
||||
set(GCOVR_EXCLUDE_ARGS "")
|
||||
foreach(EXCLUDE ${GCOVR_EXCLUDES})
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
|
||||
endforeach()
|
||||
|
||||
# Set up commands which will be run to generate coverage data
|
||||
# Run tests
|
||||
set(GCOVR_EXEC_TESTS_CMD
|
||||
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
|
||||
)
|
||||
|
||||
# Create folder
|
||||
if(DEFINED GCOVR_CREATE_FOLDER)
|
||||
set(GCOVR_FOLDER_CMD
|
||||
${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
|
||||
else()
|
||||
set(GCOVR_FOLDER_CMD echo) # dummy
|
||||
endif()
|
||||
|
||||
# Running gcovr
|
||||
set(GCOVR_CMD
|
||||
${GCOVR_PATH}
|
||||
--gcov-executable ${GCOV_TOOL}
|
||||
--gcov-ignore-parse-errors=negative_hits.warn_once_per_file
|
||||
-r ${BASEDIR}
|
||||
${GCOVR_ADDITIONAL_ARGS}
|
||||
${GCOVR_EXCLUDE_ARGS}
|
||||
--object-directory=${PROJECT_BINARY_DIR}
|
||||
)
|
||||
|
||||
if(CODE_COVERAGE_VERBOSE)
|
||||
message(STATUS "Executed command report")
|
||||
|
||||
message(STATUS "Command to run tests: ")
|
||||
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
|
||||
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
|
||||
|
||||
if(NOT GCOVR_FOLDER_CMD STREQUAL "echo")
|
||||
message(STATUS "Command to create a folder: ")
|
||||
string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}")
|
||||
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")
|
||||
endif()
|
||||
|
||||
message(STATUS "Command to generate gcovr coverage data: ")
|
||||
string(REPLACE ";" " " GCOVR_CMD_SPACED "${GCOVR_CMD}")
|
||||
message(STATUS "${GCOVR_CMD_SPACED}")
|
||||
endif()
|
||||
|
||||
add_custom_target(${Coverage_NAME}
|
||||
COMMAND ${GCOVR_EXEC_TESTS_CMD}
|
||||
COMMAND ${GCOVR_FOLDER_CMD}
|
||||
COMMAND ${GCOVR_CMD}
|
||||
|
||||
BYPRODUCTS ${GCOVR_OUTPUT_FILE}
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
DEPENDS ${Coverage_DEPENDENCIES}
|
||||
VERBATIM # Protect arguments to commands
|
||||
COMMENT "Running gcovr to produce code coverage report."
|
||||
)
|
||||
|
||||
# Show info where to find the report
|
||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
||||
COMMAND ;
|
||||
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
|
||||
)
|
||||
endfunction() # setup_target_for_coverage_gcovr
|
||||
|
||||
function(append_coverage_compiler_flags)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
|
||||
endfunction() # append_coverage_compiler_flags
|
||||
|
||||
# Setup coverage for specific library
|
||||
function(append_coverage_compiler_flags_to_target name)
|
||||
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
|
||||
target_compile_options(${name} PRIVATE ${_flag_list})
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
target_link_libraries(${name} PRIVATE gcov)
|
||||
endif()
|
||||
endfunction()
|
||||
11
CMake/Docs.cmake
Normal file
11
CMake/Docs.cmake
Normal file
@@ -0,0 +1,11 @@
|
||||
find_package (Doxygen REQUIRED)
|
||||
|
||||
set (DOXYGEN_IN ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile)
|
||||
set (DOXYGEN_OUT ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile)
|
||||
|
||||
configure_file (${DOXYGEN_IN} ${DOXYGEN_OUT} @ONLY)
|
||||
add_custom_target (docs
|
||||
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_OUT}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "Generating API documentation with Doxygen"
|
||||
VERBATIM)
|
||||
53
CMake/Settings.cmake
Normal file
53
CMake/Settings.cmake
Normal file
@@ -0,0 +1,53 @@
|
||||
set(COMPILER_FLAGS
|
||||
-Wall
|
||||
-Wcast-align
|
||||
-Wdouble-promotion
|
||||
-Wextra
|
||||
-Werror
|
||||
-Wformat=2
|
||||
-Wimplicit-fallthrough
|
||||
-Wmisleading-indentation
|
||||
-Wno-narrowing
|
||||
-Wno-deprecated-declarations
|
||||
-Wno-dangling-else
|
||||
-Wno-unused-but-set-variable
|
||||
-Wnon-virtual-dtor
|
||||
-Wnull-dereference
|
||||
-Wold-style-cast
|
||||
-pedantic
|
||||
-Wpedantic
|
||||
-Wunused
|
||||
# FIXME: The following bunch are needed for gcc12 atm.
|
||||
-Wno-missing-requires
|
||||
-Wno-restrict
|
||||
-Wno-null-dereference
|
||||
-Wno-maybe-uninitialized
|
||||
-Wno-unknown-warning-option # and this to work with clang
|
||||
# TODO: Address these and others in https://github.com/XRPLF/clio/issues/1273
|
||||
)
|
||||
|
||||
#TODO: reenable when we change CI #884
|
||||
# if (is_gcc AND NOT lint)
|
||||
# list(APPEND COMPILER_FLAGS
|
||||
# -Wduplicated-branches
|
||||
# -Wduplicated-cond
|
||||
# -Wlogical-op
|
||||
# -Wuseless-cast
|
||||
# )
|
||||
# endif ()
|
||||
|
||||
if (is_clang)
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wshadow # gcc is to aggressive with shadowing https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78147
|
||||
)
|
||||
endif ()
|
||||
|
||||
if (is_appleclang)
|
||||
list(APPEND COMPILER_FLAGS
|
||||
-Wreorder-init-list
|
||||
)
|
||||
endif ()
|
||||
|
||||
# See https://github.com/cpp-best-practices/cppbestpractices/blob/master/02-Use_the_Tools_Available.md#gcc--clang for the flags description
|
||||
|
||||
target_compile_options (clio PUBLIC ${COMPILER_FLAGS})
|
||||
11
CMake/SourceLocation.cmake
Normal file
11
CMake/SourceLocation.cmake
Normal file
@@ -0,0 +1,11 @@
|
||||
include (CheckIncludeFileCXX)
|
||||
|
||||
check_include_file_cxx ("source_location" SOURCE_LOCATION_AVAILABLE)
|
||||
if (SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions (clio PUBLIC "HAS_SOURCE_LOCATION")
|
||||
endif ()
|
||||
|
||||
check_include_file_cxx ("experimental/source_location" EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
if (EXPERIMENTAL_SOURCE_LOCATION_AVAILABLE)
|
||||
target_compile_definitions (clio PUBLIC "HAS_EXPERIMENTAL_SOURCE_LOCATION")
|
||||
endif ()
|
||||
@@ -1,6 +1,11 @@
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
set(Boost_USE_STATIC_RUNTIME ON)
|
||||
set (Boost_USE_STATIC_LIBS ON)
|
||||
set (Boost_USE_STATIC_RUNTIME ON)
|
||||
|
||||
find_package(Boost 1.75 COMPONENTS filesystem log_setup log thread system REQUIRED)
|
||||
|
||||
target_link_libraries(clio PUBLIC ${Boost_LIBRARIES})
|
||||
find_package (Boost 1.82 REQUIRED
|
||||
COMPONENTS
|
||||
program_options
|
||||
coroutine
|
||||
system
|
||||
log
|
||||
log_setup
|
||||
)
|
||||
|
||||
5
CMake/deps/OpenSSL.cmake
Normal file
5
CMake/deps/OpenSSL.cmake
Normal file
@@ -0,0 +1,5 @@
|
||||
find_package (OpenSSL 1.1.1 REQUIRED)
|
||||
|
||||
set_target_properties (OpenSSL::SSL PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
|
||||
)
|
||||
@@ -1,30 +0,0 @@
|
||||
set(POSTGRES_INSTALL_DIR ${CMAKE_BINARY_DIR}/postgres)
|
||||
set(POSTGRES_LIBS pq pgcommon pgport)
|
||||
ExternalProject_Add(postgres
|
||||
GIT_REPOSITORY https://github.com/postgres/postgres.git
|
||||
GIT_TAG REL_14_1
|
||||
GIT_SHALLOW 1
|
||||
LOG_CONFIGURE 1
|
||||
LOG_BUILD 1
|
||||
CONFIGURE_COMMAND ./configure --prefix ${POSTGRES_INSTALL_DIR} --without-readline --verbose
|
||||
BUILD_COMMAND ${CMAKE_COMMAND} -E env --unset=MAKELEVEL make VERBOSE=${CMAKE_VERBOSE_MAKEFILE} -j32
|
||||
BUILD_IN_SOURCE 1
|
||||
INSTALL_COMMAND ${CMAKE_COMMAND} -E env make -s --no-print-directory install
|
||||
UPDATE_COMMAND ""
|
||||
BUILD_BYPRODUCTS
|
||||
${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}pq${CMAKE_STATIC_LIBRARY_SUFFIX}}
|
||||
${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}pgcommon${CMAKE_STATIC_LIBRARY_SUFFIX}}
|
||||
${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}pgport${CMAKE_STATIC_LIBRARY_SUFFIX}}
|
||||
)
|
||||
|
||||
foreach(_lib ${POSTGRES_LIBS})
|
||||
add_library(${_lib} STATIC IMPORTED GLOBAL)
|
||||
add_dependencies(${_lib} postgres)
|
||||
set_target_properties(${_lib} PROPERTIES
|
||||
IMPORTED_LOCATION ${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${_lib}.a)
|
||||
set_target_properties(${_lib} PROPERTIES
|
||||
INTERFACE_INCLUDE_DIRECTORIES ${POSTGRES_INSTALL_DIR}/include)
|
||||
target_link_libraries(clio PUBLIC ${POSTGRES_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}${_lib}${CMAKE_STATIC_LIBRARY_SUFFIX})
|
||||
endforeach()
|
||||
|
||||
target_include_directories(clio PUBLIC ${POSTGRES_INSTALL_DIR}/include)
|
||||
2
CMake/deps/Threads.cmake
Normal file
2
CMake/deps/Threads.cmake
Normal file
@@ -0,0 +1,2 @@
|
||||
set (THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package (Threads)
|
||||
@@ -1,151 +1 @@
|
||||
find_package(ZLIB REQUIRED)
|
||||
|
||||
find_library(cassandra NAMES cassandra)
|
||||
if(NOT cassandra)
|
||||
message("System installed Cassandra cpp driver not found. Will build")
|
||||
find_library(zlib NAMES zlib1g-dev zlib-devel zlib z)
|
||||
if(NOT zlib)
|
||||
message("zlib not found. will build")
|
||||
add_library(zlib STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(zlib_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/madler/zlib.git
|
||||
GIT_TAG master
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
|
||||
)
|
||||
ExternalProject_Get_Property (zlib_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (zlib_src BINARY_DIR)
|
||||
set (zlib_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${zlib_src_SOURCE_DIR}/include)
|
||||
set_target_properties (zlib PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}z.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(zlib zlib_src)
|
||||
file(TO_CMAKE_PATH "${zlib_src_SOURCE_DIR}" zlib_src_SOURCE_DIR)
|
||||
endif()
|
||||
find_library(krb5 NAMES krb5-dev libkrb5-dev)
|
||||
if(NOT krb5)
|
||||
message("krb5 not found. will build")
|
||||
add_library(krb5 STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(krb5_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/krb5/krb5.git
|
||||
GIT_TAG master
|
||||
UPDATE_COMMAND ""
|
||||
CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared
|
||||
BUILD_IN_SOURCE 1
|
||||
BUILD_COMMAND make
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <SOURCE_DIR>/lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a
|
||||
)
|
||||
message(${ep_lib_prefix}/krb5.a)
|
||||
message(${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a)
|
||||
ExternalProject_Get_Property (krb5_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (krb5_src BINARY_DIR)
|
||||
set (krb5_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${krb5_src_SOURCE_DIR}/include)
|
||||
set_target_properties (krb5 PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${SOURCE_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}krb5.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(krb5 krb5_src)
|
||||
file(TO_CMAKE_PATH "${krb5_src_SOURCE_DIR}" krb5_src_SOURCE_DIR)
|
||||
endif()
|
||||
|
||||
|
||||
find_library(libuv1 NAMES uv1 libuv1 liubuv1-dev libuv1:amd64)
|
||||
|
||||
|
||||
if(NOT libuv1)
|
||||
message("libuv1 not found, will build")
|
||||
add_library(libuv1 STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(libuv_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/libuv/libuv.git
|
||||
GIT_TAG v1.x
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
|
||||
)
|
||||
|
||||
ExternalProject_Get_Property (libuv_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (libuv_src BINARY_DIR)
|
||||
set (libuv_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${libuv_src_SOURCE_DIR}/include)
|
||||
|
||||
set_target_properties (libuv1 PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}uv_a.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
add_dependencies(libuv1 libuv_src)
|
||||
|
||||
file(TO_CMAKE_PATH "${libuv_src_SOURCE_DIR}" libuv_src_SOURCE_DIR)
|
||||
endif()
|
||||
add_library (cassandra STATIC IMPORTED GLOBAL)
|
||||
ExternalProject_Add(cassandra_src
|
||||
PREFIX ${nih_cache_path}
|
||||
GIT_REPOSITORY https://github.com/datastax/cpp-driver.git
|
||||
GIT_TAG master
|
||||
CMAKE_ARGS
|
||||
-DLIBUV_ROOT_DIR=${BINARY_DIR}
|
||||
-DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include
|
||||
-DCASS_BUILD_STATIC=ON
|
||||
-DCASS_BUILD_SHARED=OFF
|
||||
INSTALL_COMMAND ""
|
||||
BUILD_BYPRODUCTS <BINARY_DIR>/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a
|
||||
)
|
||||
|
||||
ExternalProject_Get_Property (cassandra_src SOURCE_DIR)
|
||||
ExternalProject_Get_Property (cassandra_src BINARY_DIR)
|
||||
set (cassandra_src_SOURCE_DIR "${SOURCE_DIR}")
|
||||
file (MAKE_DIRECTORY ${cassandra_src_SOURCE_DIR}/include)
|
||||
|
||||
set_target_properties (cassandra PROPERTIES
|
||||
IMPORTED_LOCATION
|
||||
${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a
|
||||
INTERFACE_INCLUDE_DIRECTORIES
|
||||
${SOURCE_DIR}/include)
|
||||
message("cass dirs")
|
||||
message(${BINARY_DIR})
|
||||
message(${SOURCE_DIR})
|
||||
message(${BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}cassandra_static.a)
|
||||
add_dependencies(cassandra cassandra_src)
|
||||
|
||||
if(NOT libuv1)
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build libuv1)
|
||||
target_link_libraries(cassandra INTERFACE libuv1)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${libuv1})
|
||||
endif()
|
||||
if(NOT krb5)
|
||||
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build krb5)
|
||||
target_link_libraries(cassandra INTERFACE krb5)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${krb5})
|
||||
endif()
|
||||
|
||||
if(NOT zlib)
|
||||
ExternalProject_Add_StepDependencies(cassandra_src build zlib)
|
||||
target_link_libraries(cassandra INTERFACE zlib)
|
||||
else()
|
||||
target_link_libraries(cassandra INTERFACE ${zlib})
|
||||
endif()
|
||||
set(OPENSSL_USE_STATIC_LIBS TRUE)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
target_link_libraries(cassandra INTERFACE OpenSSL::SSL)
|
||||
|
||||
file(TO_CMAKE_PATH "${cassandra_src_SOURCE_DIR}" cassandra_src_SOURCE_DIR)
|
||||
target_link_libraries(clio PUBLIC cassandra)
|
||||
else()
|
||||
message("Found system installed cassandra cpp driver")
|
||||
message(${cassandra})
|
||||
|
||||
find_path(cassandra_includes NAMES cassandra.h REQUIRED)
|
||||
target_link_libraries (clio PUBLIC ${cassandra})
|
||||
target_include_directories(clio INTERFACE ${cassandra_includes})
|
||||
endif()
|
||||
find_package (cassandra-cpp-driver REQUIRED)
|
||||
|
||||
@@ -1,19 +1,4 @@
|
||||
FetchContent_Declare(
|
||||
googletest
|
||||
URL https://github.com/google/googletest/archive/609281088cfefc76f9d0ce82e1ff6c30cc3591e5.zip
|
||||
)
|
||||
find_package (GTest REQUIRED)
|
||||
|
||||
FetchContent_GetProperties(googletest)
|
||||
|
||||
if(NOT googletest_POPULATED)
|
||||
FetchContent_Populate(googletest)
|
||||
add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clio_tests PUBLIC clio gtest_main)
|
||||
|
||||
enable_testing()
|
||||
|
||||
include(GoogleTest)
|
||||
|
||||
gtest_discover_tests(clio_tests)
|
||||
enable_testing ()
|
||||
include (GoogleTest)
|
||||
|
||||
3
CMake/deps/libbacktrace.cmake
Normal file
3
CMake/deps/libbacktrace.cmake
Normal file
@@ -0,0 +1,3 @@
|
||||
target_compile_definitions(clio PUBLIC BOOST_STACKTRACE_LINK)
|
||||
target_compile_definitions(clio PUBLIC BOOST_STACKTRACE_USE_BACKTRACE)
|
||||
find_package(libbacktrace REQUIRED)
|
||||
1
CMake/deps/libfmt.cmake
Normal file
1
CMake/deps/libfmt.cmake
Normal file
@@ -0,0 +1 @@
|
||||
find_package (fmt REQUIRED)
|
||||
1
CMake/deps/libxrpl.cmake
Normal file
1
CMake/deps/libxrpl.cmake
Normal file
@@ -0,0 +1 @@
|
||||
find_package (xrpl REQUIRED)
|
||||
@@ -1,18 +0,0 @@
|
||||
set(RIPPLED_REPO "https://github.com/ripple/rippled.git")
|
||||
set(RIPPLED_BRANCH "develop")
|
||||
set(NIH_CACHE_ROOT "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "")
|
||||
message(STATUS "Cloning ${RIPPLED_REPO} branch ${RIPPLED_BRANCH}")
|
||||
FetchContent_Declare(rippled
|
||||
GIT_REPOSITORY "${RIPPLED_REPO}"
|
||||
GIT_TAG "${RIPPLED_BRANCH}"
|
||||
GIT_SHALLOW ON
|
||||
)
|
||||
|
||||
FetchContent_GetProperties(rippled)
|
||||
if(NOT rippled_POPULATED)
|
||||
FetchContent_Populate(rippled)
|
||||
add_subdirectory(${rippled_SOURCE_DIR} ${rippled_BINARY_DIR} EXCLUDE_FROM_ALL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(clio PUBLIC xrpl_core grpc_pbufs)
|
||||
target_include_directories(clio PUBLIC ${rippled_SOURCE_DIR}/src ) # TODO: Seems like this shouldn't be needed?
|
||||
@@ -1,4 +0,0 @@
|
||||
execute_process(
|
||||
COMMAND ln -s ${CMAKE_INSTALL_PREFIX}/${CLIO_INSTALL_DIR}/bin/clio_server /usr/local/bin/clio_server
|
||||
COMMAND_ECHO STDOUT
|
||||
)
|
||||
17
CMake/install/clio.service.in
Normal file
17
CMake/install/clio.service.in
Normal file
@@ -0,0 +1,17 @@
|
||||
[Unit]
|
||||
Description=Clio XRPL API server
|
||||
Documentation=https://github.com/XRPLF/clio.git
|
||||
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=@CLIO_INSTALL_DIR@/bin/clio_server @CLIO_INSTALL_DIR@/etc/config.json
|
||||
Restart=on-failure
|
||||
User=clio
|
||||
Group=clio
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,7 +1,14 @@
|
||||
set(CLIO_INSTALL_DIR /opt/clio)
|
||||
set(CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
|
||||
set (CLIO_INSTALL_DIR "/opt/clio")
|
||||
set (CMAKE_INSTALL_PREFIX ${CLIO_INSTALL_DIR})
|
||||
|
||||
install (TARGETS clio_server DESTINATION bin)
|
||||
|
||||
file (READ example-config.json config)
|
||||
string (REGEX REPLACE "./clio_log" "/var/log/clio/" config "${config}")
|
||||
file (WRITE ${CMAKE_BINARY_DIR}/install-config.json "${config}")
|
||||
install (FILES ${CMAKE_BINARY_DIR}/install-config.json DESTINATION etc RENAME config.json)
|
||||
|
||||
configure_file ("${CMAKE_SOURCE_DIR}/CMake/install/clio.service.in" "${CMAKE_BINARY_DIR}/clio.service")
|
||||
|
||||
install (FILES "${CMAKE_BINARY_DIR}/clio.service" DESTINATION /lib/systemd/system)
|
||||
|
||||
install(TARGETS clio_server DESTINATION bin)
|
||||
# install(TARGETS clio_tests DESTINATION bin) # NOTE: Do we want to install the tests?
|
||||
install(FILES example-config.json DESTINATION etc/clio)
|
||||
install(SCRIPT "${CMAKE_SOURCE_DIR}/CMake/install/PostInstall.cmake")
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -Wall -Werror -Wno-dangling-else")
|
||||
@@ -1 +0,0 @@
|
||||
#define VERSION "@PROJECT_VERSION@"
|
||||
387
CMakeLists.txt
387
CMakeLists.txt
@@ -1,83 +1,360 @@
|
||||
cmake_minimum_required(VERSION 3.16.3)
|
||||
project(clio)
|
||||
|
||||
project(clio VERSION 0.1.1)
|
||||
# ========================================================================== #
|
||||
# Options #
|
||||
# ========================================================================== #
|
||||
option (verbose "Verbose build" FALSE)
|
||||
option (tests "Build tests" FALSE)
|
||||
option (docs "Generate doxygen docs" FALSE)
|
||||
option (coverage "Build test coverage report" FALSE)
|
||||
option (packaging "Create distribution packages" FALSE)
|
||||
option (lint "Run clang-tidy checks during compilation" FALSE)
|
||||
# ========================================================================== #
|
||||
set (san "" CACHE STRING "Add sanitizer instrumentation")
|
||||
set (CMAKE_EXPORT_COMPILE_COMMANDS TRUE)
|
||||
set_property (CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
|
||||
# ========================================================================== #
|
||||
|
||||
option(BUILD_TESTS "Build tests" TRUE)
|
||||
# Include required modules
|
||||
include (CMake/Ccache.cmake)
|
||||
include (CheckCXXCompilerFlag)
|
||||
include (CMake/ClangTidy.cmake)
|
||||
|
||||
option(VERBOSE "Verbose build" TRUE)
|
||||
if(VERBOSE)
|
||||
set(CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||
set(FETCHCONTENT_QUIET FALSE CACHE STRING "Verbose FetchContent()")
|
||||
# Set coverage build options
|
||||
if (tests AND coverage)
|
||||
include (CMake/CodeCoverage.cmake)
|
||||
append_coverage_compiler_flags()
|
||||
endif()
|
||||
|
||||
add_library(clio)
|
||||
target_compile_features(clio PUBLIC cxx_std_20)
|
||||
target_include_directories(clio PUBLIC src)
|
||||
if (verbose)
|
||||
set (CMAKE_VERBOSE_MAKEFILE TRUE)
|
||||
endif ()
|
||||
|
||||
include(FetchContent)
|
||||
include(ExternalProject)
|
||||
include(CMake/settings.cmake)
|
||||
include(CMake/deps/rippled.cmake)
|
||||
include(CMake/deps/Boost.cmake)
|
||||
include(CMake/deps/cassandra.cmake)
|
||||
include(CMake/deps/Postgres.cmake)
|
||||
if (packaging)
|
||||
add_definitions (-DPKG=1)
|
||||
endif ()
|
||||
|
||||
# configure_file(CMake/version-config.h include/version.h) # NOTE: Not used, but an idea how to handle versioning.
|
||||
add_library (clio)
|
||||
|
||||
target_sources(clio PRIVATE
|
||||
# Clio tweaks and checks
|
||||
include (CMake/CheckCompiler.cmake)
|
||||
include (CMake/Settings.cmake)
|
||||
include (CMake/ClioVersion.cmake)
|
||||
include (CMake/SourceLocation.cmake)
|
||||
|
||||
# Clio deps
|
||||
include (CMake/deps/libxrpl.cmake)
|
||||
include (CMake/deps/Boost.cmake)
|
||||
include (CMake/deps/OpenSSL.cmake)
|
||||
include (CMake/deps/Threads.cmake)
|
||||
include (CMake/deps/libfmt.cmake)
|
||||
include (CMake/deps/cassandra.cmake)
|
||||
include (CMake/deps/libbacktrace.cmake)
|
||||
|
||||
# TODO: Include directory will be wrong when installed.
|
||||
target_include_directories (clio PUBLIC src)
|
||||
target_compile_features (clio PUBLIC cxx_std_20)
|
||||
|
||||
target_link_libraries (clio
|
||||
PUBLIC Boost::boost
|
||||
PUBLIC Boost::coroutine
|
||||
PUBLIC Boost::program_options
|
||||
PUBLIC Boost::system
|
||||
PUBLIC Boost::log
|
||||
PUBLIC Boost::log_setup
|
||||
PUBLIC Boost::stacktrace_backtrace
|
||||
PUBLIC cassandra-cpp-driver::cassandra-cpp-driver
|
||||
PUBLIC fmt::fmt
|
||||
PUBLIC OpenSSL::Crypto
|
||||
PUBLIC OpenSSL::SSL
|
||||
PUBLIC xrpl::libxrpl
|
||||
PUBLIC dl
|
||||
PUBLIC libbacktrace::libbacktrace
|
||||
|
||||
INTERFACE Threads::Threads
|
||||
)
|
||||
|
||||
if (is_gcc)
|
||||
# FIXME: needed on gcc for now
|
||||
target_compile_definitions (clio PUBLIC BOOST_ASIO_DISABLE_CONCEPTS)
|
||||
endif ()
|
||||
|
||||
target_sources (clio PRIVATE
|
||||
## Main
|
||||
src/main/impl/Build.cpp
|
||||
## Backend
|
||||
src/backend/BackendInterface.cpp
|
||||
src/backend/CassandraBackend.cpp
|
||||
src/backend/LayeredCache.cpp
|
||||
src/backend/Pg.cpp
|
||||
src/backend/PostgresBackend.cpp
|
||||
src/backend/SimpleCache.cpp
|
||||
src/data/BackendCounters.cpp
|
||||
src/data/BackendInterface.cpp
|
||||
src/data/LedgerCache.cpp
|
||||
src/data/cassandra/impl/Future.cpp
|
||||
src/data/cassandra/impl/Cluster.cpp
|
||||
src/data/cassandra/impl/Batch.cpp
|
||||
src/data/cassandra/impl/Result.cpp
|
||||
src/data/cassandra/impl/Tuple.cpp
|
||||
src/data/cassandra/impl/SslContext.cpp
|
||||
src/data/cassandra/Handle.cpp
|
||||
src/data/cassandra/SettingsProvider.cpp
|
||||
## ETL
|
||||
src/etl/ETLSource.cpp
|
||||
src/etl/ReportingETL.cpp
|
||||
## Subscriptions
|
||||
src/subscriptions/SubscriptionManager.cpp
|
||||
src/etl/Source.cpp
|
||||
src/etl/ProbingSource.cpp
|
||||
src/etl/NFTHelpers.cpp
|
||||
src/etl/ETLService.cpp
|
||||
src/etl/ETLState.cpp
|
||||
src/etl/LoadBalancer.cpp
|
||||
src/etl/impl/ForwardCache.cpp
|
||||
## Feed
|
||||
src/feed/SubscriptionManager.cpp
|
||||
src/feed/impl/TransactionFeed.cpp
|
||||
src/feed/impl/LedgerFeed.cpp
|
||||
src/feed/impl/ProposedTransactionFeed.cpp
|
||||
src/feed/impl/SingleFeedBase.cpp
|
||||
## Web
|
||||
src/web/impl/AdminVerificationStrategy.cpp
|
||||
src/web/IntervalSweepHandler.cpp
|
||||
src/web/Resolver.cpp
|
||||
## RPC
|
||||
src/rpc/RPC.cpp
|
||||
src/rpc/Errors.cpp
|
||||
src/rpc/Factories.cpp
|
||||
src/rpc/AMMHelpers.cpp
|
||||
src/rpc/RPCHelpers.cpp
|
||||
src/rpc/Counters.cpp
|
||||
## RPC Methods
|
||||
# Account
|
||||
src/rpc/WorkQueue.cpp
|
||||
src/rpc/common/Specs.cpp
|
||||
src/rpc/common/Validators.cpp
|
||||
src/rpc/common/MetaProcessors.cpp
|
||||
src/rpc/common/impl/APIVersionParser.cpp
|
||||
src/rpc/common/impl/HandlerProvider.cpp
|
||||
## RPC handlers
|
||||
src/rpc/handlers/AccountChannels.cpp
|
||||
src/rpc/handlers/AccountCurrencies.cpp
|
||||
src/rpc/handlers/AccountInfo.cpp
|
||||
src/rpc/handlers/AccountLines.cpp
|
||||
src/rpc/handlers/AccountOffers.cpp
|
||||
src/rpc/handlers/AccountNFTs.cpp
|
||||
src/rpc/handlers/AccountObjects.cpp
|
||||
src/rpc/handlers/AccountOffers.cpp
|
||||
src/rpc/handlers/AccountTx.cpp
|
||||
src/rpc/handlers/AMMInfo.cpp
|
||||
src/rpc/handlers/BookChanges.cpp
|
||||
src/rpc/handlers/BookOffers.cpp
|
||||
src/rpc/handlers/DepositAuthorized.cpp
|
||||
src/rpc/handlers/GatewayBalances.cpp
|
||||
src/rpc/handlers/NoRippleCheck.cpp
|
||||
# Ledger
|
||||
src/rpc/handlers/Ledger.cpp
|
||||
src/rpc/handlers/LedgerData.cpp
|
||||
src/rpc/handlers/LedgerEntry.cpp
|
||||
src/rpc/handlers/LedgerRange.cpp
|
||||
# Transaction
|
||||
src/rpc/handlers/Tx.cpp
|
||||
src/rpc/handlers/NFTsByIssuer.cpp
|
||||
src/rpc/handlers/NFTBuyOffers.cpp
|
||||
src/rpc/handlers/NFTHistory.cpp
|
||||
src/rpc/handlers/NFTInfo.cpp
|
||||
src/rpc/handlers/NFTOffersCommon.cpp
|
||||
src/rpc/handlers/NFTSellOffers.cpp
|
||||
src/rpc/handlers/NoRippleCheck.cpp
|
||||
src/rpc/handlers/Random.cpp
|
||||
src/rpc/handlers/TransactionEntry.cpp
|
||||
src/rpc/handlers/AccountTx.cpp
|
||||
# Dex
|
||||
src/rpc/handlers/BookOffers.cpp
|
||||
# Payment Channel
|
||||
src/rpc/handlers/ChannelAuthorize.cpp
|
||||
src/rpc/handlers/ChannelVerify.cpp
|
||||
# Subscribe
|
||||
src/rpc/handlers/Subscribe.cpp
|
||||
# Server
|
||||
src/rpc/handlers/ServerInfo.cpp
|
||||
# Utility
|
||||
src/rpc/handlers/Random.cpp)
|
||||
## Util
|
||||
src/util/config/Config.cpp
|
||||
src/util/log/Logger.cpp
|
||||
src/util/prometheus/Http.cpp
|
||||
src/util/prometheus/Label.cpp
|
||||
src/util/prometheus/MetricBase.cpp
|
||||
src/util/prometheus/MetricBuilder.cpp
|
||||
src/util/prometheus/MetricsFamily.cpp
|
||||
src/util/prometheus/OStream.cpp
|
||||
src/util/prometheus/Prometheus.cpp
|
||||
src/util/Random.cpp
|
||||
src/util/requests/RequestBuilder.cpp
|
||||
src/util/requests/Types.cpp
|
||||
src/util/requests/WsConnection.cpp
|
||||
src/util/requests/impl/SslContext.cpp
|
||||
src/util/Taggable.cpp
|
||||
src/util/TerminationHandler.cpp
|
||||
src/util/TxUtils.cpp
|
||||
src/util/LedgerUtils.cpp
|
||||
)
|
||||
|
||||
add_executable(clio_server src/main.cpp)
|
||||
target_link_libraries(clio_server PUBLIC clio)
|
||||
# Clio server
|
||||
add_executable (clio_server src/main/Main.cpp)
|
||||
target_link_libraries (clio_server PRIVATE clio)
|
||||
target_link_options(clio_server
|
||||
PRIVATE
|
||||
$<$<AND:$<NOT:$<BOOL:${APPLE}>>,$<NOT:$<BOOL:${san}>>>:-static-libstdc++ -static-libgcc>
|
||||
)
|
||||
|
||||
if(BUILD_TESTS)
|
||||
add_executable(clio_tests unittests/main.cpp)
|
||||
include(CMake/deps/gtest.cmake)
|
||||
endif()
|
||||
# Unittesting
|
||||
if (tests)
|
||||
set (TEST_TARGET clio_tests)
|
||||
add_executable (${TEST_TARGET}
|
||||
# Common
|
||||
unittests/Main.cpp
|
||||
unittests/Playground.cpp
|
||||
unittests/LoggerTests.cpp
|
||||
unittests/ConfigTests.cpp
|
||||
unittests/ProfilerTests.cpp
|
||||
unittests/JsonUtilTests.cpp
|
||||
unittests/DOSGuardTests.cpp
|
||||
unittests/util/TestGlobals.cpp
|
||||
unittests/util/AssertTests.cpp
|
||||
unittests/util/BatchingTests.cpp
|
||||
unittests/util/TestHttpServer.cpp
|
||||
unittests/util/TestObject.cpp
|
||||
unittests/util/TestWsServer.cpp
|
||||
unittests/util/TxUtilTests.cpp
|
||||
unittests/util/StringUtils.cpp
|
||||
unittests/util/LedgerUtilsTests.cpp
|
||||
unittests/util/prometheus/CounterTests.cpp
|
||||
unittests/util/prometheus/GaugeTests.cpp
|
||||
unittests/util/prometheus/HistogramTests.cpp
|
||||
unittests/util/prometheus/HttpTests.cpp
|
||||
unittests/util/prometheus/LabelTests.cpp
|
||||
unittests/util/prometheus/MetricBuilderTests.cpp
|
||||
unittests/util/prometheus/MetricsFamilyTests.cpp
|
||||
unittests/util/prometheus/OStreamTests.cpp
|
||||
unittests/util/requests/RequestBuilderTests.cpp
|
||||
unittests/util/requests/SslContextTests.cpp
|
||||
unittests/util/requests/WsConnectionTests.cpp
|
||||
# ETL
|
||||
unittests/etl/ExtractionDataPipeTests.cpp
|
||||
unittests/etl/ExtractorTests.cpp
|
||||
unittests/etl/TransformerTests.cpp
|
||||
unittests/etl/CacheLoaderTests.cpp
|
||||
unittests/etl/AmendmentBlockHandlerTests.cpp
|
||||
unittests/etl/LedgerPublisherTests.cpp
|
||||
unittests/etl/ETLStateTests.cpp
|
||||
# RPC
|
||||
unittests/rpc/ErrorTests.cpp
|
||||
unittests/rpc/BaseTests.cpp
|
||||
unittests/rpc/RPCHelpersTests.cpp
|
||||
unittests/rpc/CountersTests.cpp
|
||||
unittests/rpc/APIVersionTests.cpp
|
||||
unittests/rpc/ForwardingProxyTests.cpp
|
||||
unittests/rpc/WorkQueueTests.cpp
|
||||
unittests/rpc/AmendmentsTests.cpp
|
||||
unittests/rpc/JsonBoolTests.cpp
|
||||
## RPC handlers
|
||||
unittests/rpc/handlers/DefaultProcessorTests.cpp
|
||||
unittests/rpc/handlers/TestHandlerTests.cpp
|
||||
unittests/rpc/handlers/AccountCurrenciesTests.cpp
|
||||
unittests/rpc/handlers/AccountLinesTests.cpp
|
||||
unittests/rpc/handlers/AccountTxTests.cpp
|
||||
unittests/rpc/handlers/AccountOffersTests.cpp
|
||||
unittests/rpc/handlers/AccountInfoTests.cpp
|
||||
unittests/rpc/handlers/AccountChannelsTests.cpp
|
||||
unittests/rpc/handlers/AccountNFTsTests.cpp
|
||||
unittests/rpc/handlers/BookOffersTests.cpp
|
||||
unittests/rpc/handlers/DepositAuthorizedTests.cpp
|
||||
unittests/rpc/handlers/GatewayBalancesTests.cpp
|
||||
unittests/rpc/handlers/TxTests.cpp
|
||||
unittests/rpc/handlers/TransactionEntryTests.cpp
|
||||
unittests/rpc/handlers/LedgerEntryTests.cpp
|
||||
unittests/rpc/handlers/LedgerRangeTests.cpp
|
||||
unittests/rpc/handlers/NoRippleCheckTests.cpp
|
||||
unittests/rpc/handlers/ServerInfoTests.cpp
|
||||
unittests/rpc/handlers/PingTests.cpp
|
||||
unittests/rpc/handlers/RandomTests.cpp
|
||||
unittests/rpc/handlers/NFTInfoTests.cpp
|
||||
unittests/rpc/handlers/NFTBuyOffersTests.cpp
|
||||
unittests/rpc/handlers/NFTsByIssuerTest.cpp
|
||||
unittests/rpc/handlers/NFTSellOffersTests.cpp
|
||||
unittests/rpc/handlers/NFTHistoryTests.cpp
|
||||
unittests/rpc/handlers/SubscribeTests.cpp
|
||||
unittests/rpc/handlers/UnsubscribeTests.cpp
|
||||
unittests/rpc/handlers/LedgerDataTests.cpp
|
||||
unittests/rpc/handlers/AccountObjectsTests.cpp
|
||||
unittests/rpc/handlers/BookChangesTests.cpp
|
||||
unittests/rpc/handlers/LedgerTests.cpp
|
||||
unittests/rpc/handlers/VersionHandlerTests.cpp
|
||||
unittests/rpc/handlers/AMMInfoTests.cpp
|
||||
# Backend
|
||||
unittests/data/BackendFactoryTests.cpp
|
||||
unittests/data/BackendInterfaceTests.cpp
|
||||
unittests/data/BackendCountersTests.cpp
|
||||
unittests/data/cassandra/BaseTests.cpp
|
||||
unittests/data/cassandra/BackendTests.cpp
|
||||
unittests/data/cassandra/RetryPolicyTests.cpp
|
||||
unittests/data/cassandra/SettingsProviderTests.cpp
|
||||
unittests/data/cassandra/ExecutionStrategyTests.cpp
|
||||
unittests/data/cassandra/AsyncExecutorTests.cpp
|
||||
# Webserver
|
||||
unittests/web/AdminVerificationTests.cpp
|
||||
unittests/web/ServerTests.cpp
|
||||
unittests/web/RPCServerHandlerTests.cpp
|
||||
unittests/web/WhitelistHandlerTests.cpp
|
||||
unittests/web/SweepHandlerTests.cpp
|
||||
# Feed
|
||||
unittests/feed/SubscriptionManagerTests.cpp
|
||||
unittests/feed/SingleFeedBaseTests.cpp
|
||||
unittests/feed/ProposedTransactionFeedTests.cpp
|
||||
unittests/feed/BookChangesFeedTests.cpp
|
||||
unittests/feed/LedgerFeedTests.cpp
|
||||
unittests/feed/TransactionFeedTests.cpp
|
||||
unittests/feed/ForwardFeedTests.cpp
|
||||
unittests/feed/TrackableSignalTests.cpp)
|
||||
|
||||
include(CMake/install/install.cmake)
|
||||
include (CMake/deps/gtest.cmake)
|
||||
|
||||
# See https://github.com/google/googletest/issues/3475
|
||||
gtest_discover_tests (clio_tests DISCOVERY_TIMEOUT 10)
|
||||
|
||||
# Fix for dwarf5 bug on ci
|
||||
target_compile_options (clio PUBLIC -gdwarf-4)
|
||||
|
||||
target_compile_definitions (${TEST_TARGET} PUBLIC UNITTEST_BUILD)
|
||||
target_include_directories (${TEST_TARGET} PRIVATE unittests)
|
||||
target_link_libraries (${TEST_TARGET} PUBLIC clio gtest::gtest)
|
||||
|
||||
# Generate `coverage_report` target if coverage is enabled
|
||||
if (coverage)
|
||||
if (DEFINED CODE_COVERAGE_REPORT_FORMAT)
|
||||
set(CODE_COVERAGE_FORMAT ${CODE_COVERAGE_REPORT_FORMAT})
|
||||
else()
|
||||
set(CODE_COVERAGE_FORMAT html-details)
|
||||
endif()
|
||||
|
||||
if (DEFINED CODE_COVERAGE_TESTS_ARGS)
|
||||
set(TESTS_ADDITIONAL_ARGS ${CODE_COVERAGE_TESTS_ARGS})
|
||||
separate_arguments(TESTS_ADDITIONAL_ARGS)
|
||||
else()
|
||||
set(TESTS_ADDITIONAL_ARGS "")
|
||||
endif()
|
||||
|
||||
set (GCOVR_ADDITIONAL_ARGS --exclude-throw-branches -s)
|
||||
|
||||
setup_target_for_coverage_gcovr(
|
||||
NAME coverage_report
|
||||
FORMAT ${CODE_COVERAGE_FORMAT}
|
||||
EXECUTABLE clio_tests
|
||||
EXECUTABLE_ARGS --gtest_brief=1 ${TESTS_ADDITIONAL_ARGS}
|
||||
EXCLUDE "unittests"
|
||||
DEPENDENCIES clio_tests
|
||||
)
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
# Enable selected sanitizer if enabled via `san`
|
||||
if (san)
|
||||
target_compile_options (clio
|
||||
PUBLIC
|
||||
# Sanitizers recommend minimum of -O1 for reasonable performance
|
||||
$<$<CONFIG:Debug>:-O1>
|
||||
${SAN_FLAG}
|
||||
-fno-omit-frame-pointer)
|
||||
target_compile_definitions (clio
|
||||
PUBLIC
|
||||
$<$<STREQUAL:${san},address>:SANITIZER=ASAN>
|
||||
$<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN>
|
||||
$<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>)
|
||||
target_link_libraries (clio INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
endif ()
|
||||
|
||||
# Generate `docs` target for doxygen documentation if enabled
|
||||
# Note: use `make docs` to generate the documentation
|
||||
if (docs)
|
||||
include (CMake/Docs.cmake)
|
||||
endif ()
|
||||
|
||||
include (CMake/install/install.cmake)
|
||||
if (packaging)
|
||||
include (CMake/packaging.cmake) # This file exists only in build runner
|
||||
endif ()
|
||||
|
||||
140
CONTRIBUTING.md
Normal file
140
CONTRIBUTING.md
Normal file
@@ -0,0 +1,140 @@
|
||||
# Contributing
|
||||
Thank you for your interest in contributing to the `clio` project 🙏
|
||||
|
||||
To contribute, please:
|
||||
1. Fork the repository under your own user.
|
||||
2. Create a new branch on which to commit/push your changes.
|
||||
3. Write and test your code.
|
||||
4. Ensure that your code compiles with the provided build engine and update the provided build engine as part of your PR where needed and where appropriate.
|
||||
5. Where applicable, write test cases for your code and include those in `unittests`.
|
||||
6. Ensure your code passes automated checks (e.g. clang-format)
|
||||
7. Squash your commits (i.e. rebase) into as few commits as is reasonable to describe your changes at a high level (typically a single commit for a small change). See below for more details.
|
||||
8. Open a PR to the main repository onto the _develop_ branch, and follow the provided template.
|
||||
|
||||
> **Note:** Please read the [Style guide](#style-guide).
|
||||
|
||||
## Install git hooks
|
||||
Please run the following command in order to use git hooks that are helpful for `clio` development.
|
||||
|
||||
``` bash
|
||||
git config --local core.hooksPath .githooks
|
||||
```
|
||||
|
||||
## Git commands
|
||||
This sections offers a detailed look at the git commands you will need to use to get your PR submitted.
|
||||
Please note that there are more than one way to do this and these commands are provided for your convenience.
|
||||
At this point it's assumed that you have already finished working on your feature/bug.
|
||||
|
||||
> **Important:** Before you issue any of the commands below, please hit the `Sync fork` button and make sure your fork's `develop` branch is up-to-date with the main `clio` repository.
|
||||
|
||||
``` bash
|
||||
# Create a backup of your branch
|
||||
git branch <your feature branch>_bk
|
||||
|
||||
# Rebase and squash commits into one
|
||||
git checkout develop
|
||||
git pull origin develop
|
||||
git checkout <your feature branch>
|
||||
git rebase -i develop
|
||||
```
|
||||
For each commit in the list other than the first one, enter `s` to squash.
|
||||
After this is done, you will have the opportunity to write a message for the squashed commit.
|
||||
|
||||
> **Hint:** Please use **imperative mood** in the commit message, and capitalize the first word.
|
||||
|
||||
``` bash
|
||||
# You should now have a single commit on top of a commit in `develop`
|
||||
git log
|
||||
```
|
||||
> **Note:** If there are merge conflicts, please resolve them now.
|
||||
|
||||
``` bash
|
||||
# Use the same commit message as you did above
|
||||
git commit -m 'Your message'
|
||||
git rebase --continue
|
||||
```
|
||||
|
||||
> **Important:** If you have no GPG keys set up, please follow [this tutorial](https://docs.github.com/en/authentication/managing-commit-signature-verification/adding-a-gpg-key-to-your-github-account)
|
||||
|
||||
``` bash
|
||||
# Sign the commit with your GPG key, and push your changes
|
||||
git commit --amend -S
|
||||
git push --force
|
||||
```
|
||||
|
||||
## Use ccache (optional)
|
||||
Clio uses ccache to speed up compilation. If you want to use it, please make sure it is installed on your machine.
|
||||
CMake will automatically detect it and use it if it's available.
|
||||
|
||||
|
||||
## Fixing issues found during code review
|
||||
While your code is in review, it's possible that some changes will be requested by reviewer(s).
|
||||
This section describes the process of adding your fixes.
|
||||
|
||||
We assume that you already made the required changes on your feature branch.
|
||||
|
||||
``` bash
|
||||
# Add the changed code
|
||||
git add <paths to add>
|
||||
|
||||
# Add a [FOLD] commit message (so you remember to squash it later)
|
||||
# while also signing it with your GPG key
|
||||
git commit -S -m "[FOLD] Your commit message"
|
||||
|
||||
# And finally push your changes
|
||||
git push
|
||||
```
|
||||
|
||||
## After code review
|
||||
When your PR is approved and ready to merge, use `Squash and merge`.
|
||||
The button for that is near the bottom of the PR's page on GitHub.
|
||||
|
||||
> **Important:** Please leave the automatically-generated mention/link to the PR in the subject line **and** in the description field add `"Fix #ISSUE_ID"` (replacing `ISSUE_ID` with yours) if the PR fixes an issue.
|
||||
> **Note:** See [issues](https://github.com/XRPLF/clio/issues) to find the `ISSUE_ID` for the feature/bug you were working on.
|
||||
|
||||
# Style guide
|
||||
This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent.
|
||||
|
||||
## Formatting
|
||||
Code must conform to `clang-format` version 17, unless the result would be unreasonably difficult to read or maintain.
|
||||
To change your code to conform use `clang-format -i <your changed files>`.
|
||||
|
||||
## Avoid
|
||||
* Proliferation of nearly identical code.
|
||||
* Proliferation of new files and classes unless it improves readability or/and compilation time.
|
||||
* Unmanaged memory allocation and raw pointers.
|
||||
* Macros (unless they add significant value.)
|
||||
* Lambda patterns (unless these add significant value.)
|
||||
* CPU or architecture-specific code unless there is a good reason to include it, and where it is used guard it with macros and provide explanatory comments.
|
||||
* Importing new libraries unless there is a very good reason to do so.
|
||||
|
||||
## Seek to
|
||||
* Extend functionality of existing code rather than creating new code.
|
||||
* Prefer readability over terseness where important logic is concerned.
|
||||
* Inline functions that are not used or are not likely to be used elsewhere in the codebase.
|
||||
* Use clear and self-explanatory names for functions, variables, structs and classes.
|
||||
* Use TitleCase for classes, structs and filenames, camelCase for function and variable names, lower case for namespaces and folders.
|
||||
* Provide as many comments as you feel that a competent programmer would need to understand what your code does.
|
||||
|
||||
# Maintainers
|
||||
Maintainers are ecosystem participants with elevated access to the repository. They are able to push new code, make decisions on when a release should be made, etc.
|
||||
|
||||
## Code Review
|
||||
A PR must be reviewed and approved by at least one of the maintainers before it can be merged.
|
||||
|
||||
## Adding and Removing
|
||||
New maintainers can be proposed by two existing maintainers, subject to a vote by a quorum of the existing maintainers. A minimum of 50% support and a 50% participation is required. In the event of a tie vote, the addition of the new maintainer will be rejected.
|
||||
|
||||
Existing maintainers can resign, or be subject to a vote for removal at the behest of two existing maintainers. A minimum of 60% agreement and 50% participation are required. The XRP Ledger Foundation will have the ability, for cause, to remove an existing maintainer without a vote.
|
||||
|
||||
## Existing Maintainers
|
||||
|
||||
* [cindyyan317](https://github.com/cindyyan317) (Ripple)
|
||||
* [godexsoft](https://github.com/godexsoft) (Ripple)
|
||||
* [kuznetsss](https://github.com/kuznetsss) (Ripple)
|
||||
* [legleux](https://github.com/legleux) (Ripple)
|
||||
|
||||
## Honorable ex-Maintainers
|
||||
|
||||
* [cjcobb23](https://github.com/cjcobb23) (ex-Ripple)
|
||||
* [natenichols](https://github.com/natenichols) (ex-Ripple)
|
||||
16
Doxyfile
Normal file
16
Doxyfile
Normal file
@@ -0,0 +1,16 @@
|
||||
PROJECT_NAME = "Clio"
|
||||
INPUT = ../src ../unittests
|
||||
EXCLUDE_PATTERNS = *Test*.cpp *Test*.hpp
|
||||
RECURSIVE = YES
|
||||
HAVE_DOT = YES
|
||||
|
||||
QUIET = YES
|
||||
WARNINGS = NO
|
||||
WARN_NO_PARAMDOC = NO
|
||||
WARN_IF_INCOMPLETE_DOC = NO
|
||||
WARN_IF_UNDOCUMENTED = NO
|
||||
|
||||
GENERATE_LATEX = NO
|
||||
GENERATE_HTML = YES
|
||||
|
||||
SORT_MEMBERS_CTORS_1ST = YES
|
||||
344
README.md
344
README.md
@@ -1,78 +1,182 @@
|
||||
**Status:** This software is in beta mode. We encourage anyone to try it out and
|
||||
report any issues they discover. Version 1.0 coming soon.
|
||||
|
||||
# Clio
|
||||
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over websocket or JSON-RPC. Validated
|
||||
historical ledger and transaction data is stored in a more space efficient format,
|
||||
[](https://github.com/XRPLF/clio/actions/workflows/build.yml?query=branch%3Adevelop)
|
||||
[](https://github.com/XRPLF/clio/actions/workflows/nightly.yml?query=branch%3Adevelop)
|
||||
[](https://github.com/XRPLF/clio/actions/workflows/clang-tidy.yml?query=branch%3Adevelop)
|
||||
[](https://app.codecov.io/gh/XRPLF/clio)
|
||||
|
||||
Clio is an XRP Ledger API server. Clio is optimized for RPC calls, over WebSocket or JSON-RPC.
|
||||
Validated historical ledger and transaction data are stored in a more space-efficient format,
|
||||
using up to 4 times less space than rippled. Clio can be configured to store data in Apache Cassandra or ScyllaDB,
|
||||
allowing for scalable read throughput. Multiple Clio nodes can share
|
||||
access to the same dataset, allowing for a highly available cluster of Clio nodes,
|
||||
without the need for redundant data storage or computation.
|
||||
allowing for scalable read throughput. Multiple Clio nodes can share access to the same dataset,
|
||||
allowing for a highly available cluster of Clio nodes, without the need for redundant data storage or computation.
|
||||
|
||||
Clio offers the full rippled API, with the caveat that Clio by default only returns validated data.
|
||||
This means that `ledger_index` defaults to `validated` instead of `current` for all requests.
|
||||
Other non-validated data is also not returned, such as information about queued transactions.
|
||||
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node, and propagates the response back to the client. To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
|
||||
For requests that require access to the p2p network, such as `fee` or `submit`, Clio automatically forwards the request to a rippled node and propagates the response back to the client.
|
||||
To access non-validated data for *any* request, simply add `ledger_index: "current"` to the request, and Clio will forward the request to rippled.
|
||||
|
||||
Clio does not connect to the peer to peer network. Instead, Clio extracts data from a specified rippled node. Running Clio requires access to a rippled node
|
||||
Clio does not connect to the peer-to-peer network. Instead, Clio extracts data from a group of specified rippled nodes. Running Clio requires access to at least one rippled node
|
||||
from which data can be extracted. The rippled node does not need to be running on the same machine as Clio.
|
||||
|
||||
## Help
|
||||
Feel free to open an [issue](https://github.com/XRPLF/clio/issues) if you have a feature request or something doesn't work as expected.
|
||||
If you have any questions about building, running, contributing, using clio or any other, you could always start a new [discussion](https://github.com/XRPLF/clio/discussions).
|
||||
|
||||
## Requirements
|
||||
1. Access to a Cassandra cluster or ScyllaDB cluster. Can be local or remote.
|
||||
|
||||
2. Access to one or more rippled nodes. Can be local or remote.
|
||||
|
||||
## Building
|
||||
|
||||
Clio is built with cmake. Clio requires c++20, and boost 1.75.0 or later.
|
||||
Clio is built with CMake and uses Conan for managing dependencies.
|
||||
It is written in C++20 and therefore requires a modern compiler.
|
||||
|
||||
Use these instructions to build a Clio executable from source. These instructions were tested on Ubuntu 20.04 LTS.
|
||||
## Prerequisites
|
||||
|
||||
### Minimum Requirements
|
||||
|
||||
- [Python 3.7](https://www.python.org/downloads/)
|
||||
- [Conan 1.55](https://conan.io/downloads.html)
|
||||
- [CMake 3.16](https://cmake.org/download/)
|
||||
- [**Optional**] [GCovr](https://gcc.gnu.org/onlinedocs/gcc/Gcov.html) (needed for code coverage generation)
|
||||
- [**Optional**] [CCache](https://ccache.dev/) (speeds up compilation if you are going to compile Clio often)
|
||||
|
||||
| Compiler | Version |
|
||||
|-------------|---------|
|
||||
| GCC | 11 |
|
||||
| Clang | 14 |
|
||||
| Apple Clang | 14.0.3 |
|
||||
|
||||
### Conan configuration
|
||||
|
||||
Clio does not require anything but default settings in your (`~/.conan/profiles/default`) Conan profile. It's best to have no extra flags specified.
|
||||
> Mac example:
|
||||
```
|
||||
[settings]
|
||||
os=Macos
|
||||
os_build=Macos
|
||||
arch=armv8
|
||||
arch_build=armv8
|
||||
compiler=apple-clang
|
||||
compiler.version=14
|
||||
compiler.libcxx=libc++
|
||||
build_type=Release
|
||||
compiler.cppstd=20
|
||||
```
|
||||
> Linux example:
|
||||
```
|
||||
[settings]
|
||||
os=Linux
|
||||
os_build=Linux
|
||||
arch=x86_64
|
||||
arch_build=x86_64
|
||||
compiler=gcc
|
||||
compiler.version=11
|
||||
compiler.libcxx=libstdc++11
|
||||
build_type=Release
|
||||
compiler.cppstd=20
|
||||
```
|
||||
# Install dependencies
|
||||
sudo apt-get -y install git pkg-config protobuf-compiler libprotobuf-dev libssl-dev wget build-essential bison flex autoconf cmake
|
||||
|
||||
# Compile Boost
|
||||
wget -O $HOME/boost_1_75_0.tar.gz https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz
|
||||
tar xvzf $HOME/boost_1_75_0.tar.gz
|
||||
cd $HOME/boost_1_75_0
|
||||
./bootstrap.sh
|
||||
./b2 -j$(nproc)
|
||||
echo "export BOOST_ROOT=$HOME/boost_1_75_0" >> $HOME/.profile && source $HOME/.profile
|
||||
### Artifactory
|
||||
|
||||
# Clone the Clio Git repository & build Clio
|
||||
cd $HOME
|
||||
git clone https://github.com/XRPLF/clio.git
|
||||
cd $HOME/clio
|
||||
cmake -B build && cmake --build build --parallel $(nproc)
|
||||
1. Make sure artifactory is setup with Conan
|
||||
```sh
|
||||
conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
```
|
||||
Now you should be able to download prebuilt `xrpl` package on some platforms.
|
||||
|
||||
You might need to edit the `~/.conan/remotes.json` file to ensure that this newly added artifactory is listed last. Otherwise you might see compilation errors when building the project with gcc version 13 (or newer).
|
||||
|
||||
2. Remove old packages you may have cached:
|
||||
```sh
|
||||
conan remove -f xrpl
|
||||
```
|
||||
|
||||
## Building Clio
|
||||
|
||||
Navigate to Clio's root directory and perform
|
||||
```sh
|
||||
mkdir build && cd build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=False
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
|
||||
```
|
||||
If all goes well, `conan install` will find required packages and `cmake` will do the rest. you should end up with `clio_server` and `clio_tests` in the `build` directory (the current directory).
|
||||
|
||||
> **Tip:** You can omit the `-o tests=True` in `conan install` command above if you don't want to build `clio_tests`.
|
||||
|
||||
> **Tip:** To generate a Code Coverage report, include `-o coverage=True` in the `conan install` command above, along with `-o tests=True` to enable tests. After running the `cmake` commands, execute `make clio_tests-ccov`. The coverage report will be found at `clio_tests-llvm-cov/index.html`.
|
||||
|
||||
## Building Clio with Docker
|
||||
|
||||
It is possible to build Clio using docker if you don't want to install all the dependencies on your machine.
|
||||
```sh
|
||||
docker run -it rippleci/clio_ci:latest
|
||||
git clone https://github.com/XRPLF/clio
|
||||
mkdir build && cd build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=False
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build . --parallel 8 # or without the number if you feel extra adventurous
|
||||
```
|
||||
|
||||
## Running
|
||||
`./clio_server config.json`
|
||||
```sh
|
||||
./clio_server config.json
|
||||
```
|
||||
|
||||
Clio needs access to a rippled server. The config files of rippled and Clio need
|
||||
to match in a certain sense.
|
||||
Clio needs to know:
|
||||
- the ip of rippled
|
||||
- the port on which rippled is accepting unencrypted websocket connections
|
||||
- the IP of rippled
|
||||
- the port on which rippled is accepting unencrypted WebSocket connections
|
||||
- the port on which rippled is handling gRPC requests
|
||||
|
||||
rippled needs to open:
|
||||
- a port to accept unencrypted websocket connections
|
||||
- a port to handle gRPC requests, with the ip(s) of Clio specified in the `secure_gateway` entry
|
||||
- a port to handle gRPC requests, with the IP(s) of Clio specified in the `secure_gateway` entry
|
||||
|
||||
The example configs of rippled and Clio are setup such that minimal changes are
|
||||
The example configs of rippled and Clio are setups such that minimal changes are
|
||||
required. When running locally, the only change needed is to uncomment the `port_grpc`
|
||||
section of the rippled config. When running Clio and rippled on separate machines,
|
||||
in addition to uncommenting the `port_grpc` section, a few other steps must be taken:
|
||||
1. change the `ip` of the first entry of `etl_sources` to the ip where your rippled
|
||||
1. change the `ip` of the first entry of `etl_sources` to the IP where your rippled
|
||||
server is running
|
||||
2. open a public, unencrypted websocket port on your rippled server
|
||||
3. change the ip specified in `secure_gateway` of `port_grpc` section of the rippled config
|
||||
to the ip of your Clio server. This entry can take the form of a comma separated list if
|
||||
2. open a public, unencrypted WebSocket port on your rippled server
|
||||
3. change the IP specified in `secure_gateway` of `port_grpc` section of the rippled config
|
||||
to the IP of your Clio server. This entry can take the form of a comma-separated list if
|
||||
you are running multiple Clio nodes.
|
||||
|
||||
|
||||
In addition, the parameter `start_sequence` can be included and configured within the top level of the config file. This parameter specifies the sequence of first ledger to extract if the database is empty. Note that ETL extracts ledgers in order and that no backfilling functionality currently exists, meaning Clio will not retroactively learn ledgers older than the one you specify. Choosing to specify this or not will yield the following behavior:
|
||||
- If this setting is absent and the database is empty, ETL will start with the next ledger validated by the network.
|
||||
- If this setting is present and the database is not empty, an exception is thrown.
|
||||
|
||||
In addition, the optional parameter `finish_sequence` can be added to the json file as well, specifying where the ledger can stop.
|
||||
|
||||
To add `start_sequence` and/or `finish_sequence` to the config.json file appropriately, they will be on the same top level of precedence as other parameters (such as `database`, `etl_sources`, `read_only`, etc.) and be specified with an integer. Here is an example snippet from the config file:
|
||||
|
||||
```json
|
||||
"start_sequence": 12345,
|
||||
"finish_sequence": 54321
|
||||
```
|
||||
|
||||
The parameters `ssl_cert_file` and `ssl_key_file` can also be added to the top level of precedence of our Clio config. `ssl_cert_file` specifies the filepath for your SSL cert while `ssl_key_file` specifies the filepath for your SSL key. It is up to you how to change ownership of these folders for your designated Clio user. Your options include:
|
||||
- Copying the two files as root somewhere that's accessible by the Clio user, then running `sudo chown` to your user
|
||||
- Changing the permissions directly so it's readable by your Clio user
|
||||
- Running Clio as root (strongly discouraged)
|
||||
|
||||
An example of how to specify `ssl_cert_file` and `ssl_key_file` in the config:
|
||||
|
||||
```json
|
||||
"server": {
|
||||
"ip": "0.0.0.0",
|
||||
"port": 51233
|
||||
},
|
||||
"ssl_cert_file": "/full/path/to/cert.file",
|
||||
"ssl_key_file": "/full/path/to/key.file"
|
||||
```
|
||||
|
||||
Once your config files are ready, start rippled and Clio. It doesn't matter which you
|
||||
start first, and it's fine to stop one or the other and restart at any given time.
|
||||
|
||||
@@ -84,7 +188,7 @@ the most recent ledger on the network, and then backfill. If Clio is extracting
|
||||
from rippled, and then rippled is stopped for a significant amount of time and then restarted, rippled
|
||||
will take time to backfill to the next ledger that Clio wants. The time it takes is proportional
|
||||
to the amount of time rippled was offline for. Also be aware that the amount rippled backfills
|
||||
is dependent on the online_delete and ledger_history config values; if these values
|
||||
are dependent on the online_delete and ledger_history config values; if these values
|
||||
are small, and rippled is stopped for a significant amount of time, rippled may never backfill
|
||||
to the ledger that Clio wants. To avoid this situation, it is advised to keep history
|
||||
proportional to the amount of time that you expect rippled to be offline. For example, if you
|
||||
@@ -106,7 +210,7 @@ This can take some time, and depends on database throughput. With a moderately f
|
||||
database, this should take less than 10 minutes. If you did not properly set `secure_gateway`
|
||||
in the `port_grpc` section of rippled, this step will fail. Once the first ledger
|
||||
is fully downloaded, Clio only needs to extract the changed data for each ledger,
|
||||
so extraction is much faster and Clio can keep up with rippled in real time. Even under
|
||||
so extraction is much faster and Clio can keep up with rippled in real-time. Even under
|
||||
intense load, Clio should not lag behind the network, as Clio is not processing the data,
|
||||
and is simply writing to a database. The throughput of Clio is dependent on the throughput
|
||||
of your database, but a standard Cassandra or Scylla deployment can handle
|
||||
@@ -140,3 +244,167 @@ are doing this, be aware that database traffic will be flowing across regions,
|
||||
which can cause high latencies. A possible alternative to this is to just deploy
|
||||
a database in each region, and the Clio nodes in each region use their region's database.
|
||||
This is effectively two systems.
|
||||
|
||||
Clio supports API versioning as [described here](https://xrpl.org/request-formatting.html#api-versioning).
|
||||
It's possible to configure `minimum`, `maximum` and `default` version like so:
|
||||
```json
|
||||
"api_version": {
|
||||
"min": 1,
|
||||
"max": 2,
|
||||
"default": 1
|
||||
}
|
||||
```
|
||||
All of the above are optional.
|
||||
Clio will fallback to hardcoded defaults when not specified in the config file or configured values are outside
|
||||
of the minimum and maximum supported versions hardcoded in `src/rpc/common/APIVersion.h`.
|
||||
> **Note:** See `example-config.json` for more details.
|
||||
|
||||
## Admin rights for requests
|
||||
|
||||
By default clio checks admin privileges by IP address from request (only `127.0.0.1` is considered to be an admin).
|
||||
It is not very secure because the IP could be spoofed.
|
||||
For a better security `admin_password` could be provided in the `server` section of clio's config:
|
||||
```json
|
||||
"server": {
|
||||
"admin_password": "secret"
|
||||
}
|
||||
```
|
||||
If the password is presented in the config, clio will check the Authorization header (if any) in each request for the password.
|
||||
The Authorization header should contain type `Password` and the password from the config, e.g. `Password secret`.
|
||||
Exactly equal password gains admin rights for the request or a websocket connection.
|
||||
|
||||
## Prometheus metrics collection
|
||||
|
||||
Clio natively supports Prometheus metrics collection. It accepts Prometheus requests on the port configured in `server` section of config.
|
||||
Prometheus metrics are enabled by default. To disable it add `"prometheus": { "enabled": false }` to the config.
|
||||
It is important to know that clio responds to Prometheus request only if they are admin requests, so Prometheus should be configured to send admin password in header.
|
||||
There is an example of docker-compose file, Prometheus and Grafana configs in [examples/infrastructure](examples/infrastructure).
|
||||
|
||||
## Using clang-tidy for static analysis
|
||||
|
||||
Minimum clang-tidy version required is 17.0.
|
||||
Clang-tidy could be run by cmake during building the project.
|
||||
For that provide the option `-o lint=True` for `conan install` command:
|
||||
```sh
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release -o tests=True -o lint=True
|
||||
```
|
||||
By default cmake will try to find clang-tidy automatically in your system.
|
||||
To force cmake use desired binary set `CLIO_CLANG_TIDY_BIN` environment variable as path to clang-tidy binary.
|
||||
E.g.:
|
||||
```sh
|
||||
export CLIO_CLANG_TIDY_BIN=/opt/homebrew/opt/llvm@17/bin/clang-tidy
|
||||
```
|
||||
|
||||
## Coverage report
|
||||
|
||||
Coverage report is intended for the developers using compilers GCC
|
||||
or Clang (including Apple Clang). It is generated by the build target `coverage_report`,
|
||||
which is only enabled when both `tests` and `coverage` options are set, e.g. with
|
||||
`-o coverage=True -o tests=True` in `conan`
|
||||
|
||||
Prerequisites for the coverage report:
|
||||
|
||||
- [gcovr tool](https://gcovr.com/en/stable/getting-started.html) (can be installed e.g. with `pip install gcovr`)
|
||||
- `gcov` for GCC (installed with the compiler by default) or
|
||||
- `llvm-cov` for Clang (installed with the compiler by default, also on Apple)
|
||||
- `Debug` build type
|
||||
|
||||
Coverage report is created when the following steps are completed, in order:
|
||||
|
||||
1. `clio_tests` binary built with the instrumentation data, enabled by the `coverage`
|
||||
option mentioned above
|
||||
2. completed run of unit tests, which populates coverage capture data
|
||||
3. completed run of `gcovr` tool (which internally invokes either `gcov` or `llvm-cov`)
|
||||
to assemble both instrumentation data and coverage capture data into a coverage report
|
||||
|
||||
Above steps are automated into a single target `coverage_report`. The instrumented
|
||||
`clio_tests` binary can be also used for running regular unit tests. In case of a
|
||||
spurious failure of unit tests, it is possile to re-run `coverage_report` target without
|
||||
rebuilding the `clio_tests` binary (since it is simply a dependency of the coverage report target).
|
||||
|
||||
The default coverage report format is `html-details`, but the developers
|
||||
can override it to any of the formats listed in `CMake/CodeCoverage.cmake`
|
||||
by setting `CODE_COVERAGE_REPORT_FORMAT` variable in `cmake`. For example, CI
|
||||
is setting this parameter to `xml` for the [codecov](codecov.io) integration.
|
||||
|
||||
In case if some unit tests predictably fail e.g. due to absence of a Cassandra database, it is possible
|
||||
to set unit tests options in `CODE_COVERAGE_TESTS_ARGS` cmake variable, as demonstrated below:
|
||||
|
||||
```
|
||||
cd .build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Debug -o tests=True -o coverage=True
|
||||
cmake -DCODE_COVERAGE_REPORT_FORMAT=json-details -DCMAKE_BUILD_TYPE=Debug -DCODE_COVERAGE_TESTS_ARGS="--gtest_filter=-BackendCassandra*" -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ..
|
||||
cmake --build . --target coverage_report
|
||||
```
|
||||
|
||||
After `coverage_report` target is completed, the generated coverage report will be
|
||||
stored inside the build directory, as either of:
|
||||
|
||||
- file named `coverage_report.*`, with a suitable extension for the report format, or
|
||||
- directory named `coverage_report`, with `index.html` and other files inside, for `html-details` or `html-nested` report formats.
|
||||
|
||||
|
||||
## Developing against `rippled` in standalone mode
|
||||
|
||||
If you wish you develop against a `rippled` instance running in standalone
|
||||
mode there are a few quirks of both clio and rippled you need to keep in mind.
|
||||
You must:
|
||||
|
||||
1. Advance the `rippled` ledger to at least ledger 256
|
||||
2. Wait 10 minutes before first starting clio against this standalone node.
|
||||
|
||||
## Logging
|
||||
Clio provides several logging options, all are configurable via the config file and are detailed below.
|
||||
|
||||
`log_level`: The minimum level of severity at which the log message will be outputted by default.
|
||||
Severity options are `trace`, `debug`, `info`, `warning`, `error`, `fatal`. Defaults to `info`.
|
||||
|
||||
`log_format`: The format of log lines produced by clio. Defaults to `"%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%"`.
|
||||
Each of the variables expands like so
|
||||
- `TimeStamp`: The full date and time of the log entry
|
||||
- `SourceLocation`: A partial path to the c++ file and the line number in said file (`source/file/path:linenumber`)
|
||||
- `ThreadID`: The ID of the thread the log entry is written from
|
||||
- `Channel`: The channel that this log entry was sent to
|
||||
- `Severity`: The severity (aka log level) the entry was sent at
|
||||
- `Message`: The actual log message
|
||||
|
||||
`log_channels`: An array of json objects, each overriding properties for a logging `channel`.
|
||||
At the moment of writing, only `log_level` can be overriden using this mechanism.
|
||||
|
||||
Each object is of this format:
|
||||
```json
|
||||
{
|
||||
"channel": "Backend",
|
||||
"log_level": "fatal"
|
||||
}
|
||||
```
|
||||
If no override is present for a given channel, that channel will log at the severity specified by the global `log_level`.
|
||||
Overridable log channels: `Backend`, `WebServer`, `Subscriptions`, `RPC`, `ETL` and `Performance`.
|
||||
|
||||
> **Note:** See `example-config.json` for more details.
|
||||
|
||||
`log_to_console`: Enable/disable log output to console. Options are `true`/`false`. Defaults to true.
|
||||
|
||||
`log_directory`: Path to the directory where log files are stored. If such directory doesn't exist, Clio will create it. If not specified, logs are not written to a file.
|
||||
|
||||
`log_rotation_size`: The max size of the log file in **megabytes** before it will rotate into a smaller file. Defaults to 2GB.
|
||||
|
||||
`log_directory_max_size`: The max size of the log directory in **megabytes** before old log files will be
|
||||
deleted to free up space. Defaults to 50GB.
|
||||
|
||||
`log_rotation_hour_interval`: The time interval in **hours** after the last log rotation to automatically
|
||||
rotate the current log file. Defaults to 12 hours.
|
||||
|
||||
Note, time-based log rotation occurs dependently on size-based log rotation, where if a
|
||||
size-based log rotation occurs, the timer for the time-based rotation will reset.
|
||||
|
||||
`log_tag_style`: Tag implementation to use. Must be one of:
|
||||
- `uint`: Lock free and threadsafe but outputs just a simple unsigned integer
|
||||
- `uuid`: Threadsafe and outputs a UUID tag
|
||||
- `none`: Don't use tagging at all
|
||||
|
||||
## Cassandra / Scylla Administration
|
||||
|
||||
Since Clio relies on either Cassandra or Scylla for its database backend, here are some important considerations:
|
||||
|
||||
- Scylla, by default, will reserve all free RAM on a machine for itself. If you are running `rippled` or other services on the same machine, restrict its memory usage using the `--memory` argument: https://docs.scylladb.com/getting-started/scylla-in-a-shared-environment/
|
||||
|
||||
121
REVIEW.md
121
REVIEW.md
@@ -1,121 +0,0 @@
|
||||
# How to review clio
|
||||
Clio is a massive project, and thus I don't expect the code to be reviewed the
|
||||
way a normal PR would. So I put this guide together to help reviewers look at
|
||||
the relevant pieces of code without getting lost in the weeds.
|
||||
|
||||
One thing reviewers should keep in mind is that most of clio is designed to be
|
||||
lightweight and simple. We try not to introduce any uneccessary complexity and
|
||||
keep the code as simple and straightforward as possible. Sometimes complexity is
|
||||
unavoidable, but simplicity is the goal.
|
||||
|
||||
## Order of review
|
||||
The code is organized into 4 main components, each with their own folder. The
|
||||
code in each folder is as self contained as possible. A good way to approach
|
||||
the review would be to review one folder at a time.
|
||||
|
||||
### backend
|
||||
The code in the backend folder is the heart of the project, and reviewers should
|
||||
start here. This is the most complex part of the code, as well as the most
|
||||
performance sensitive. clio does not keep any data in memory, so performance
|
||||
generally depends on the data model and the way we talk to the database.
|
||||
|
||||
Reviewers should start with the README in this folder to get a high level idea
|
||||
of the data model and to review the data model itself. Then, reviewers should
|
||||
dive into the implementation. The table schemas and queries for Cassandra are
|
||||
defined in `CassandraBackend::open()`. The table schemas for Postgres are defined
|
||||
in Pg.cpp. The queries for Postgres are defined in each of the functions of `PostgresBackend`.
|
||||
A good way to approach the implementation would be to look at the table schemas,
|
||||
and then go through the functions declared in `BackendInterface`. Reviewers could
|
||||
also branch out to the rest of the code by looking at where these functions are
|
||||
called from.
|
||||
|
||||
### webserver
|
||||
The code in the webserver folder implements the web server for handling RPC requests.
|
||||
This code was mostly copied and pasted from boost beast example code, so I would
|
||||
really appreciate review here.
|
||||
|
||||
### rpc
|
||||
The rpc folder contains all of the handlers and any helper functions they need.
|
||||
This code is not too complicated, so reviewers don't need to dwell long here.
|
||||
|
||||
### etl
|
||||
The etl folder contains all of the code for extracting data from rippled. This
|
||||
code is complex and important, but most of this code was just copied from rippled
|
||||
reporting mode, and thus has already been reviewed and is being used in prod.
|
||||
|
||||
## Design decisions that should be reviewed
|
||||
|
||||
### Data model
|
||||
Reviewers should review the general data model. The data model itself is described
|
||||
at a high level in the README in the backend folder. The table schemas and queries
|
||||
for Cassandra are defined in the `open()` function of `CassandraBackend`. The table
|
||||
schemas for Postgres are defined in Pg.cpp.
|
||||
|
||||
Particular attention should be paid to the keys table, and the problem that solves
|
||||
(successor/upper bound). I originally was going to have a special table for book_offers,
|
||||
but then I decided that we could use the keys table itself for that and save space.
|
||||
This makes book_offers somewhat slow compared to rippled, though still very usable.
|
||||
|
||||
### Large rows
|
||||
I did some tricks with Cassandra to deal with very large rows in the keys and account_tx
|
||||
tables. For each of these, the partition key (the first component of the primary
|
||||
key) is a compound key. This is meant to break large rows into smaller rows. This
|
||||
is done to avoid hotspots. Data is sharded in Cassandra, and if some rows get very
|
||||
large, some nodes can have a lot more data than others.
|
||||
|
||||
For account_tx, this has performance implications when iterating very far back
|
||||
in time. Refer to the `fetchAccountTransactions()` function in `CassandraBackend`.
|
||||
|
||||
It is unclear if this needs to be done for other tables.
|
||||
|
||||
### Postgres table partitioning
|
||||
Originally, Postgres exhibited performance problems when the dataset approach 1
|
||||
TB. This was solved by table partitioning.
|
||||
|
||||
### Threading
|
||||
I used asio for multithreading. There are a lot of different io_contexts lying
|
||||
around the code. This needs to be cleaned up a bit. Most of these are really
|
||||
just ways to submit an async job to a single thread. I don't think it makes
|
||||
sense to have one io_context for the whole application, but some of the threading
|
||||
is a bit opaque and could be cleaned up.
|
||||
|
||||
### Boost Json
|
||||
I used boost json for serializing data to json.
|
||||
|
||||
### No cache
|
||||
As of now, there is no cache. I am not sure if a cache is even worth it. A
|
||||
transaction cache would not be hard, but a cache for ledger data will be hard.
|
||||
While a cache would improve performance, it would increase memory usage. clio
|
||||
is designed to be lightweight. Also, I've reached thousands of requests per
|
||||
second with a single clio node, so I'm not sure performance is even an issue.
|
||||
|
||||
## Things I'm less than happy about
|
||||
|
||||
#### BackendIndexer
|
||||
This is a particularly hairy piece of code that handles writing to the keys table.
|
||||
I am not too happy with this code. Parts of it need to execute in real time as
|
||||
part of ETL, and other parts are allowed to run in the background. There is also
|
||||
code that detects if a previous background job failed to complete before the
|
||||
server shutdown, and thus tries to rerun that job. The code feels tacked on, and
|
||||
I would like it to be more cleanly integrated with the rest of the code.
|
||||
|
||||
#### Shifting
|
||||
There is some bit shifting going on with the keys table and the account_tx table.
|
||||
The keys table is written to every 2^20 ledgers. Maybe it would be better to just
|
||||
write every 1 million ledgers.
|
||||
|
||||
#### performance of book_offers
|
||||
book_offers is a bit slow. It could be sped up in a variety of ways. One is to
|
||||
keep a separate book_offers table. However, this is not straightforward and will
|
||||
use more space. Another is to keep a cache of book_offers for the most recent ledger
|
||||
(or few ledgers). I am not sure if this is worth it
|
||||
|
||||
#### account_tx in Cassandra
|
||||
After the fix to deal with large rows, account_tx can be slow at times when using
|
||||
Cassandra. Specifically, if there are large gaps in time where the account was
|
||||
not affected by any transactions, the code will be reading empty records. I would
|
||||
like to sidestep this issue if possible.
|
||||
|
||||
#### Implementation of fetchLedgerPage
|
||||
`fetchLedgerPage()` is rather complex. Part of this seems unavoidable, since this
|
||||
code is dealing with the keys table.
|
||||
@@ -1,38 +1,43 @@
|
||||
/*
|
||||
* This is an example configuration file. Please do not use without modifying to suit your needs.
|
||||
*/
|
||||
{
|
||||
"database":
|
||||
{
|
||||
"type":"cassandra",
|
||||
"cassandra":
|
||||
{
|
||||
"secure_connect_bundle":"[path/to/zip. ignore if using contact_points]",
|
||||
"contact_points":"[ip. ignore if using secure_connect_bundle]",
|
||||
"port":"[port. ignore if using_secure_connect_bundle]",
|
||||
"keyspace":"clio",
|
||||
"username":"[username, if any]",
|
||||
"password":"[password, if any]",
|
||||
"max_requests_outstanding":25000,
|
||||
"threads":8
|
||||
"database": {
|
||||
"type": "cassandra",
|
||||
"cassandra": {
|
||||
// This option can be used to setup a secure connect bundle connection
|
||||
"secure_connect_bundle": "[path/to/zip. ignore if using contact_points]",
|
||||
// The following options are used only if using contact_points
|
||||
"contact_points": "[ip. ignore if using secure_connect_bundle]",
|
||||
"port": "[port. ignore if using_secure_connect_bundle]",
|
||||
// Authentication settings
|
||||
"username": "[username, if any]",
|
||||
"password": "[password, if any]",
|
||||
// Other common settings
|
||||
"keyspace": "clio",
|
||||
"max_write_requests_outstanding": 25000,
|
||||
"max_read_requests_outstanding": 30000,
|
||||
"threads": 8
|
||||
}
|
||||
},
|
||||
"etl_sources":
|
||||
[
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip":"[rippled ip]",
|
||||
"ws_port":"6006",
|
||||
"grpc_port":"50051"
|
||||
"ip": "[rippled ip]",
|
||||
"ws_port": "6006",
|
||||
"grpc_port": "50051"
|
||||
}
|
||||
],
|
||||
"dos_guard":
|
||||
{
|
||||
"whitelist":["127.0.0.1"]
|
||||
"dos_guard": {
|
||||
"whitelist": [
|
||||
"127.0.0.1"
|
||||
]
|
||||
},
|
||||
"server":{
|
||||
"ip":"0.0.0.0",
|
||||
"port":8080
|
||||
"server": {
|
||||
"ip": "0.0.0.0",
|
||||
"port": 8080
|
||||
},
|
||||
"log_level":"debug",
|
||||
"log_file":"./clio.log",
|
||||
"online_delete":0,
|
||||
"extractor_threads":8,
|
||||
"read_only":false
|
||||
"log_level": "debug",
|
||||
"log_file": "./clio.log",
|
||||
"extractor_threads": 8,
|
||||
"read_only": false
|
||||
}
|
||||
|
||||
92
conanfile.py
Normal file
92
conanfile.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from conan import ConanFile
|
||||
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
|
||||
|
||||
|
||||
class Clio(ConanFile):
|
||||
name = 'clio'
|
||||
license = 'ISC'
|
||||
author = 'Alex Kremer <akremer@ripple.com>, John Freeman <jfreeman@ripple.com>'
|
||||
url = 'https://github.com/xrplf/clio'
|
||||
description = 'Clio RPC server'
|
||||
settings = 'os', 'compiler', 'build_type', 'arch'
|
||||
options = {
|
||||
'fPIC': [True, False],
|
||||
'verbose': [True, False],
|
||||
'tests': [True, False], # build unit tests; create `clio_tests` binary
|
||||
'docs': [True, False], # doxygen API docs; create custom target 'docs'
|
||||
'packaging': [True, False], # create distribution packages
|
||||
'coverage': [True, False], # build for test coverage report; create custom target `clio_tests-ccov`
|
||||
'lint': [True, False], # run clang-tidy checks during compilation
|
||||
}
|
||||
|
||||
requires = [
|
||||
'boost/1.82.0',
|
||||
'cassandra-cpp-driver/2.17.0',
|
||||
'fmt/10.1.1',
|
||||
'protobuf/3.21.9',
|
||||
'grpc/1.50.1',
|
||||
'openssl/1.1.1u',
|
||||
'xrpl/2.2.0',
|
||||
'libbacktrace/cci.20210118'
|
||||
]
|
||||
|
||||
default_options = {
|
||||
'fPIC': True,
|
||||
'verbose': False,
|
||||
'tests': False,
|
||||
'packaging': False,
|
||||
'coverage': False,
|
||||
'lint': False,
|
||||
'docs': False,
|
||||
|
||||
'xrpl/*:tests': False,
|
||||
'cassandra-cpp-driver/*:shared': False,
|
||||
'date/*:header_only': True,
|
||||
'grpc/*:shared': False,
|
||||
'grpc/*:secure': True,
|
||||
'libpq/*:shared': False,
|
||||
'lz4/*:shared': False,
|
||||
'openssl/*:shared': False,
|
||||
'protobuf/*:shared': False,
|
||||
'protobuf/*:with_zlib': True,
|
||||
'snappy/*:shared': False,
|
||||
'gtest/*:no_main': True,
|
||||
}
|
||||
|
||||
exports_sources = (
|
||||
'CMakeLists.txt', 'CMake/*', 'src/*'
|
||||
)
|
||||
|
||||
def requirements(self):
|
||||
if self.options.tests:
|
||||
self.requires('gtest/1.14.0')
|
||||
|
||||
def configure(self):
|
||||
if self.settings.compiler == 'apple-clang':
|
||||
self.options['boost'].visibility = 'global'
|
||||
|
||||
def layout(self):
|
||||
cmake_layout(self)
|
||||
# Fix this setting to follow the default introduced in Conan 1.48
|
||||
# to align with our build instructions.
|
||||
self.folders.generators = 'build/generators'
|
||||
|
||||
generators = 'CMakeDeps'
|
||||
def generate(self):
|
||||
tc = CMakeToolchain(self)
|
||||
tc.variables['verbose'] = self.options.verbose
|
||||
tc.variables['tests'] = self.options.tests
|
||||
tc.variables['coverage'] = self.options.coverage
|
||||
tc.variables['lint'] = self.options.lint
|
||||
tc.variables['docs'] = self.options.docs
|
||||
tc.variables['packaging'] = self.options.packaging
|
||||
tc.generate()
|
||||
|
||||
def build(self):
|
||||
cmake = CMake(self)
|
||||
cmake.configure()
|
||||
cmake.build()
|
||||
|
||||
def package(self):
|
||||
cmake = CMake(self)
|
||||
cmake.install()
|
||||
49
docker/centos/Dockerfile
Normal file
49
docker/centos/Dockerfile
Normal file
@@ -0,0 +1,49 @@
|
||||
# FROM centos:7 as deps
|
||||
FROM centos:7 as build
|
||||
|
||||
ENV CLIO_DIR=/opt/clio/
|
||||
# ENV OPENSSL_DIR=/opt/openssl
|
||||
|
||||
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
|
||||
RUN yum install -y devtoolset-11
|
||||
ENV version=3.16
|
||||
ENV build=3
|
||||
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
|
||||
COPY docker/shared/install_cmake.sh /install_cmake.sh
|
||||
RUN /install_cmake.sh 3.16.3 /usr/local
|
||||
RUN source /opt/rh/devtoolset-11/enable
|
||||
WORKDIR /tmp
|
||||
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
|
||||
COPY docker/centos/build_git_centos7.sh build_git_centos7.sh
|
||||
|
||||
RUN ./build_git_centos7.sh
|
||||
RUN git clone https://github.com/openssl/openssl
|
||||
WORKDIR /tmp/openssl
|
||||
RUN git checkout OpenSSL_1_1_1q
|
||||
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
|
||||
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
|
||||
make -j $(nproc) && \
|
||||
make install_sw
|
||||
WORKDIR /tmp
|
||||
# FROM centos:7 as build
|
||||
|
||||
RUN git clone https://github.com/xrplf/clio.git
|
||||
COPY docker/shared/build_boost.sh build_boost.sh
|
||||
ENV OPENSSL_ROOT=/opt/local/openssl
|
||||
ENV BOOST_ROOT=/boost
|
||||
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
|
||||
RUN yum install -y bison flex
|
||||
RUN yum install -y rpmdevtools rpmlint
|
||||
RUN source /opt/rh/devtoolset-11/enable && cd /tmp/clio && \
|
||||
cmake -B build -DBUILD_TESTS=1 && \
|
||||
cmake --build build --parallel $(nproc)
|
||||
RUN mkdir output
|
||||
RUN strip clio/build/clio_server && strip clio/build/clio_tests
|
||||
RUN cp clio/build/clio_tests output/ && cp clio/build/clio_server output/
|
||||
RUN cp clio/example-config.json output/example-config.json
|
||||
|
||||
FROM centos:7
|
||||
COPY --from=build /tmp/output /clio
|
||||
RUN mkdir -p /opt/clio/etc && mv /clio/example-config.json /opt/clio/etc/config.json
|
||||
|
||||
CMD ["/clio/clio_server", "/opt/clio/etc/config.json"]
|
||||
18
docker/centos/build_git_centos7.sh
Executable file
18
docker/centos/build_git_centos7.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
GIT_VERSION="2.37.1"
|
||||
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
|
||||
tar zxvf git-${GIT_VERSION}.tar.gz
|
||||
cd git-${GIT_VERSION}
|
||||
|
||||
yum install -y centos-release-scl epel-release
|
||||
yum update -y
|
||||
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
|
||||
|
||||
source /opt/rh/devtoolset-11/enable
|
||||
make configure
|
||||
./configure
|
||||
make git -j$(nproc)
|
||||
make install git
|
||||
git --version | cut -d ' ' -f3
|
||||
11
docker/centos/install_cmake.sh
Executable file
11
docker/centos/install_cmake.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
CMAKE_VERSION=${1:-"3.16.3"}
|
||||
cd /tmp
|
||||
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||
curl -OJLs $URL
|
||||
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||
67
docker/ci/dockerfile
Normal file
67
docker/ci/dockerfile
Normal file
@@ -0,0 +1,67 @@
|
||||
FROM ubuntu:focal
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG TARGETARCH
|
||||
|
||||
USER root
|
||||
WORKDIR /root/
|
||||
|
||||
ENV GCC_VERSION=11 \
|
||||
CCACHE_VERSION=4.8.3 \
|
||||
LLVM_TOOLS_VERSION=17 \
|
||||
GH_VERSION=2.40.0
|
||||
|
||||
# Add repositories
|
||||
RUN apt-get -qq update \
|
||||
&& apt-get -qq install -y --no-install-recommends --no-install-suggests gnupg wget curl software-properties-common \
|
||||
&& add-apt-repository -y ppa:ubuntu-toolchain-r/test \
|
||||
&& wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | apt-key add - \
|
||||
&& apt-add-repository 'deb https://apt.kitware.com/ubuntu/ focal main' \
|
||||
&& echo "deb http://apt.llvm.org/focal/ llvm-toolchain-focal-${LLVM_TOOLS_VERSION} main" >> /etc/apt/sources.list \
|
||||
&& wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
||||
|
||||
# Install packages
|
||||
RUN apt update -qq \
|
||||
&& apt install -y --no-install-recommends --no-install-suggests cmake python3 python3-pip sudo git \
|
||||
ninja-build make pkg-config libzstd-dev libzstd1 g++-${GCC_VERSION} jq \
|
||||
clang-format-${LLVM_TOOLS_VERSION} clang-tidy-${LLVM_TOOLS_VERSION} clang-tools-${LLVM_TOOLS_VERSION} \
|
||||
&& update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov gcov /usr/bin/gcov-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-dump-${GCC_VERSION} 100 \
|
||||
&& update-alternatives --install /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-tool-${GCC_VERSION} 100 \
|
||||
&& apt-get clean && apt remove -y software-properties-common \
|
||||
&& pip3 install -q --upgrade --no-cache-dir pip \
|
||||
&& pip3 install -q --no-cache-dir conan==1.62 gcovr
|
||||
|
||||
# Install ccache from source
|
||||
WORKDIR /tmp
|
||||
RUN wget "https://github.com/ccache/ccache/releases/download/v${CCACHE_VERSION}/ccache-${CCACHE_VERSION}.tar.gz" \
|
||||
&& tar xf "ccache-${CCACHE_VERSION}.tar.gz" \
|
||||
&& cd "ccache-${CCACHE_VERSION}" \
|
||||
&& mkdir build && cd build \
|
||||
&& cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \
|
||||
&& ninja && cp ./ccache /usr/bin/ccache
|
||||
|
||||
# Install gh
|
||||
RUN wget https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
|
||||
&& tar xf gh_${GH_VERSION}_linux_${TARGETARCH}.tar.gz \
|
||||
&& mv gh_${GH_VERSION}_linux_${TARGETARCH}/bin/gh /usr/bin/gh
|
||||
|
||||
# Clean up
|
||||
RUN rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
WORKDIR /root/
|
||||
# Using root by default is not very secure but github checkout action doesn't work with any other user
|
||||
# https://github.com/actions/checkout/issues/956
|
||||
# And Github Actions doc recommends using root
|
||||
# https://docs.github.com/en/actions/creating-actions/dockerfile-support-for-github-actions#user
|
||||
|
||||
# Setup conan
|
||||
RUN conan profile new default --detect \
|
||||
&& conan profile update settings.compiler.cppstd=20 default \
|
||||
&& conan profile update settings.compiler.libcxx=libstdc++11 default \
|
||||
&& conan remote add --insert 0 conan-non-prod http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
|
||||
|
||||
13
docker/clio_docker/centos/build_boost.sh
Executable file
13
docker/clio_docker/centos/build_boost.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
set -exu
|
||||
|
||||
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
|
||||
# it's either those or link=static that halves the failures. probably link=static
|
||||
BOOST_VERSION=$1
|
||||
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
|
||||
echo "BOOST_VERSION: ${BOOST_VERSION}"
|
||||
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
|
||||
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
|
||||
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
|
||||
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
|
||||
mkdir -p /boost && mv boost /boost && mv stage /boost
|
||||
18
docker/clio_docker/centos/build_git_centos7.sh
Executable file
18
docker/clio_docker/centos/build_git_centos7.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -ex
|
||||
GIT_VERSION="2.37.1"
|
||||
curl -OJL https://github.com/git/git/archive/refs/tags/v${GIT_VERSION}.tar.gz
|
||||
tar zxvf git-${GIT_VERSION}.tar.gz
|
||||
cd git-${GIT_VERSION}
|
||||
|
||||
yum install -y centos-release-scl epel-release
|
||||
yum update -y
|
||||
yum install -y devtoolset-11 autoconf gnu-getopt gettext zlib-devel libcurl-devel
|
||||
|
||||
source /opt/rh/devtoolset-11/enable
|
||||
make configure
|
||||
./configure
|
||||
make git -j$(nproc)
|
||||
make install git
|
||||
git --version | cut -d ' ' -f3
|
||||
34
docker/clio_docker/centos/dockerfile
Normal file
34
docker/clio_docker/centos/dockerfile
Normal file
@@ -0,0 +1,34 @@
|
||||
FROM centos:7
|
||||
|
||||
ENV CLIO_DIR=/opt/clio/
|
||||
# ENV OPENSSL_DIR=/opt/openssl
|
||||
|
||||
RUN yum -y install git epel-release centos-release-scl perl-IPC-Cmd openssl
|
||||
RUN yum install -y devtoolset-11
|
||||
ENV version=3.16
|
||||
ENV build=3
|
||||
# RUN curl -OJL https://cmake.org/files/v$version/cmake-$version.$build.tar.gz
|
||||
COPY install_cmake.sh /install_cmake.sh
|
||||
RUN /install_cmake.sh 3.16.3 /usr/local
|
||||
RUN source /opt/rh/devtoolset-11/enable
|
||||
WORKDIR /tmp
|
||||
# RUN mkdir $OPENSSL_DIR && cd $OPENSSL_DIR
|
||||
COPY build_git_centos7.sh build_git_centos7.sh
|
||||
|
||||
RUN ./build_git_centos7.sh
|
||||
RUN git clone https://github.com/openssl/openssl
|
||||
WORKDIR /tmp/openssl
|
||||
RUN git checkout OpenSSL_1_1_1q
|
||||
#--prefix=/usr --openssldir=/etc/ssl --libdir=lib no-shared zlib-dynamic
|
||||
RUN SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") && ./config -fPIC --prefix=/usr --openssldir=${SSLDIR} zlib shared && \
|
||||
make -j $(nproc) && \
|
||||
make install_sw
|
||||
WORKDIR /tmp
|
||||
RUN git clone https://github.com/xrplf/clio.git
|
||||
COPY build_boost.sh build_boost.sh
|
||||
ENV OPENSSL_ROOT=/opt/local/openssl
|
||||
ENV BOOST_ROOT=/boost
|
||||
RUN source scl_source enable devtoolset-11 && /tmp/build_boost.sh 1.75.0
|
||||
RUN yum install -y bison flex
|
||||
RUN source /opt/rh/devtoolset-11/enable && \
|
||||
cd /tmp/clio && cmake -B build -Dtests=0 -Dlocal_libarchive=1 -Dunity=0 -DBUILD_TESTS=0 && cmake --build build --parallel $(nproc)
|
||||
11
docker/clio_docker/centos/install_cmake.sh
Executable file
11
docker/clio_docker/centos/install_cmake.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
CMAKE_VERSION=${1:-"3.16.3"}
|
||||
cd /tmp
|
||||
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||
curl -OJLs $URL
|
||||
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||
13
docker/shared/build_boost.sh
Executable file
13
docker/shared/build_boost.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
set -exu
|
||||
|
||||
#yum install wget lz4 lz4-devel git llvm13-static.x86_64 llvm13-devel.x86_64 devtoolset-11-binutils zlib-static
|
||||
# it's either those or link=static that halves the failures. probably link=static
|
||||
BOOST_VERSION=$1
|
||||
BOOST_VERSION_=$(echo ${BOOST_VERSION} | tr . _)
|
||||
echo "BOOST_VERSION: ${BOOST_VERSION}"
|
||||
echo "BOOST_VERSION_: ${BOOST_VERSION_}"
|
||||
curl -OJLs "https://boostorg.jfrog.io/artifactory/main/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION_}.tar.gz"
|
||||
tar zxf "boost_${BOOST_VERSION_}.tar.gz"
|
||||
cd boost_${BOOST_VERSION_} && ./bootstrap.sh && ./b2 --without-python link=static -j$(nproc)
|
||||
mkdir -p /boost && mv boost /boost && mv stage /boost
|
||||
11
docker/shared/install_cmake.sh
Executable file
11
docker/shared/install_cmake.sh
Executable file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
CMAKE_VERSION=${1:-"3.16.3"}
|
||||
cd /tmp
|
||||
URL="https://github.com/Kitware/CMake/releases/download/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}-linux-x86_64.tar.gz"
|
||||
curl -OJLs $URL
|
||||
tar xzvf cmake-${CMAKE_VERSION}-Linux-x86_64.tar.gz
|
||||
mv cmake-${CMAKE_VERSION}-Linux-x86_64 /opt/
|
||||
ln -s /opt/cmake-${CMAKE_VERSION}-Linux-x86_64/bin/cmake /usr/local/bin/cmake
|
||||
3
docker/shared/install_openssl.sh
Executable file
3
docker/shared/install_openssl.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
24
docker/ubuntu/Dockerfile
Normal file
24
docker/ubuntu/Dockerfile
Normal file
@@ -0,0 +1,24 @@
|
||||
FROM ubuntu:20.04 AS boost
|
||||
|
||||
RUN apt-get update && apt-get install -y build-essential
|
||||
ARG BOOST_VERSION_=1_75_0
|
||||
ARG BOOST_VERSION=1.75.0
|
||||
COPY docker/shared/build_boost.sh .
|
||||
RUN apt install -y curl
|
||||
RUN ./build_boost.sh ${BOOST_VERSION}
|
||||
ENV BOOST_ROOT=/boost
|
||||
|
||||
FROM ubuntu:20.04 AS build
|
||||
ENV BOOST_ROOT=/boost
|
||||
COPY --from=boost /boost /boost
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y build-essential software-properties-common pkg-config libssl-dev wget curl gpg git zlib1g-dev bison flex autoconf lsb-release
|
||||
RUN apt install -y gpg-agent
|
||||
RUN wget https://apt.llvm.org/llvm.sh
|
||||
RUN chmod +x llvm.sh && ./llvm.sh 14 && ./llvm.sh 15
|
||||
# COPY . /clio
|
||||
## Install cmake
|
||||
ARG CMAKE_VERSION=3.16.3
|
||||
COPY docker/shared/install_cmake.sh .
|
||||
RUN ./install_cmake.sh ${CMAKE_VERSION}
|
||||
ENV PATH="/opt/local/cmake/bin:$PATH"
|
||||
@@ -1,37 +1,130 @@
|
||||
/*
|
||||
* This is an example configuration file. Please do not use without modifying to suit your needs.
|
||||
*/
|
||||
{
|
||||
"database":
|
||||
{
|
||||
"type":"cassandra",
|
||||
"cassandra":
|
||||
{
|
||||
"contact_points":"127.0.0.1",
|
||||
"port":9042,
|
||||
"keyspace":"clio",
|
||||
"replication_factor":1,
|
||||
"table_prefix":"",
|
||||
"max_requests_outstanding":25000,
|
||||
"threads":8
|
||||
"database": {
|
||||
"type": "cassandra",
|
||||
"cassandra": {
|
||||
"contact_points": "127.0.0.1",
|
||||
"port": 9042,
|
||||
"keyspace": "clio",
|
||||
"replication_factor": 1,
|
||||
"table_prefix": "",
|
||||
"max_write_requests_outstanding": 25000,
|
||||
"max_read_requests_outstanding": 30000,
|
||||
"threads": 8,
|
||||
//
|
||||
// Advanced options. USE AT OWN RISK:
|
||||
// ---
|
||||
"core_connections_per_host": 1, // Defaults to 1
|
||||
"write_batch_size": 20 // Defaults to 20
|
||||
//
|
||||
// Below options will use defaults from cassandra driver if left unspecified.
|
||||
// See https://docs.datastax.com/en/developer/cpp-driver/2.17/api/struct.CassCluster/ for details.
|
||||
//
|
||||
// "queue_size_io": 2
|
||||
//
|
||||
// ---
|
||||
}
|
||||
},
|
||||
"etl_sources":
|
||||
[
|
||||
"allow_no_etl": false, // Allow Clio to run without valid ETL source, otherwise Clio will stop if ETL check fails
|
||||
"etl_sources": [
|
||||
{
|
||||
"ip":"127.0.0.1",
|
||||
"ws_port":"6006",
|
||||
"grpc_port":"50051"
|
||||
"ip": "127.0.0.1",
|
||||
"ws_port": "6006",
|
||||
"grpc_port": "50051"
|
||||
}
|
||||
],
|
||||
"dos_guard":
|
||||
{
|
||||
"whitelist":["127.0.0.1"]
|
||||
"dos_guard": {
|
||||
// Comma-separated list of IPs to exclude from rate limiting
|
||||
"whitelist": [
|
||||
"127.0.0.1"
|
||||
],
|
||||
//
|
||||
// The below values are the default values and are only specified here
|
||||
// for documentation purposes. The rate limiter currently limits
|
||||
// connections and bandwidth per IP. The rate limiter looks at the raw
|
||||
// IP of a client connection, and so requests routed through a load
|
||||
// balancer will all have the same IP and be treated as a single client.
|
||||
//
|
||||
"max_fetches": 1000000, // Max bytes per IP per sweep interval
|
||||
"max_connections": 20, // Max connections per IP
|
||||
"max_requests": 20, // Max connections per IP per sweep interval
|
||||
"sweep_interval": 1 // Time in seconds before resetting max_fetches and max_requests
|
||||
},
|
||||
"server":{
|
||||
"ip":"0.0.0.0",
|
||||
"port":51233
|
||||
"cache": {
|
||||
// Comma-separated list of peer nodes that Clio can use to download cache from at startup
|
||||
"peers": [
|
||||
{
|
||||
"ip": "127.0.0.1",
|
||||
"port": 51234
|
||||
}
|
||||
]
|
||||
},
|
||||
"log_level":"debug",
|
||||
"log_file":"./clio.log",
|
||||
"online_delete":0,
|
||||
"extractor_threads":8,
|
||||
"read_only":false
|
||||
"server": {
|
||||
"ip": "0.0.0.0",
|
||||
"port": 51233,
|
||||
// Max number of requests to queue up before rejecting further requests.
|
||||
// Defaults to 0, which disables the limit.
|
||||
"max_queue_size": 500,
|
||||
// If request contains header with authorization, Clio will check if it matches the prefix 'Password ' + this value's sha256 hash
|
||||
// If matches, the request will be considered as admin request
|
||||
"admin_password": "xrp",
|
||||
// If local_admin is true, Clio will consider requests come from 127.0.0.1 as admin requests
|
||||
// It's true by default unless admin_password is set,'local_admin' : true and 'admin_password' can not be set at the same time
|
||||
"local_amdin": false
|
||||
},
|
||||
// Overrides log level on a per logging channel.
|
||||
// Defaults to global "log_level" for each unspecified channel.
|
||||
"log_channels": [
|
||||
{
|
||||
"channel": "Backend",
|
||||
"log_level": "fatal"
|
||||
},
|
||||
{
|
||||
"channel": "WebServer",
|
||||
"log_level": "info"
|
||||
},
|
||||
{
|
||||
"channel": "Subscriptions",
|
||||
"log_level": "info"
|
||||
},
|
||||
{
|
||||
"channel": "RPC",
|
||||
"log_level": "error"
|
||||
},
|
||||
{
|
||||
"channel": "ETL",
|
||||
"log_level": "debug"
|
||||
},
|
||||
{
|
||||
"channel": "Performance",
|
||||
"log_level": "trace"
|
||||
}
|
||||
],
|
||||
"prometheus": {
|
||||
"enabled": true,
|
||||
"compress_reply": true
|
||||
},
|
||||
"log_level": "info",
|
||||
// Log format (this is the default format)
|
||||
"log_format": "%TimeStamp% (%SourceLocation%) [%ThreadID%] %Channel%:%Severity% %Message%",
|
||||
"log_to_console": true,
|
||||
// Clio logs to file in the specified directory only if "log_directory" is set
|
||||
// "log_directory": "./clio_log",
|
||||
"log_rotation_size": 2048,
|
||||
"log_directory_max_size": 51200,
|
||||
"log_rotation_hour_interval": 12,
|
||||
"log_tag_style": "uint",
|
||||
"extractor_threads": 8,
|
||||
"read_only": false,
|
||||
// "start_sequence": [integer] the ledger index to start from,
|
||||
// "finish_sequence": [integer] the ledger index to finish at,
|
||||
// "ssl_cert_file" : "/full/path/to/cert.file",
|
||||
// "ssl_key_file" : "/full/path/to/key.file"
|
||||
"api_version": {
|
||||
"min": 1, // Minimum API version supported (could be 1 or 2)
|
||||
"max": 2, // Maximum API version supported (could be 1 or 2, but >= min)
|
||||
"default": 1 // Clio behaves the same as rippled by default
|
||||
}
|
||||
}
|
||||
|
||||
25
examples/infrastructure/README.md
Normal file
25
examples/infrastructure/README.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Example of clio monitoring infrastructure
|
||||
|
||||
This directory contains an example of docker based infrastructure to collect and visualise metrics from clio.
|
||||
|
||||
The structure of the directory:
|
||||
- `compose.yaml`
|
||||
Docker-compose file with Prometheus and Grafana set up.
|
||||
- `prometheus.yaml`
|
||||
Defines metrics collection from Clio and Prometheus itself.
|
||||
Demonstrates how to setup Clio target and Clio's admin authorisation in Prometheus.
|
||||
- `grafana/clio_dashboard.json`
|
||||
Json file containing preconfigured dashboard in Grafana format.
|
||||
- `grafana/dashboard_local.yaml`
|
||||
Grafana configuration file defining the directory to search for dashboards json files.
|
||||
- `grafana/datasources.yaml`
|
||||
Grafana configuration file defining Prometheus as a data source for Grafana.
|
||||
|
||||
## How to try
|
||||
|
||||
1. Make sure you have `docker` and `docker-compose` installed.
|
||||
2. Run `docker-compose up -d` from this directory. It will start docker containers with Prometheus and Grafana.
|
||||
3. Open [http://localhost:3000/dashboards](http://localhost:3000/dashboards). Grafana login `admin`, password `grafana`.
|
||||
There will be preconfigured Clio dashboard.
|
||||
|
||||
If Clio is not running yet launch Clio to see metrics. Some of the metrics may appear only after requests to Clio.
|
||||
20
examples/infrastructure/compose.yaml
Normal file
20
examples/infrastructure/compose.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
services:
|
||||
prometheus:
|
||||
image: prom/prometheus
|
||||
ports:
|
||||
- 9090:9090
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yml
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
grafana:
|
||||
image: grafana/grafana
|
||||
ports:
|
||||
- 3000:3000
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=grafana
|
||||
volumes:
|
||||
- ./grafana/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
|
||||
- ./grafana/dashboard_local.yaml:/etc/grafana/provisioning/dashboards/local.yaml
|
||||
- ./grafana/clio_dashboard.json:/var/lib/grafana/dashboards/clio_dashboard.json
|
||||
1240
examples/infrastructure/grafana/clio_dashboard.json
Normal file
1240
examples/infrastructure/grafana/clio_dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
23
examples/infrastructure/grafana/dashboard_local.yaml
Normal file
23
examples/infrastructure/grafana/dashboard_local.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'Clio dashboard'
|
||||
# <int> Org id. Default to 1
|
||||
orgId: 1
|
||||
# <string> name of the dashboard folder.
|
||||
folder: ''
|
||||
# <string> folder UID. will be automatically generated if not specified
|
||||
folderUid: ''
|
||||
# <string> provider type. Default to 'file'
|
||||
type: file
|
||||
# <bool> disable dashboard deletion
|
||||
disableDeletion: false
|
||||
# <int> how often Grafana will scan for changed dashboards
|
||||
updateIntervalSeconds: 10
|
||||
# <bool> allow updating provisioned dashboards from the UI
|
||||
allowUiUpdates: false
|
||||
options:
|
||||
# <string, required> path to dashboard files on disk. Required when using the 'file' type
|
||||
path: /var/lib/grafana/dashboards
|
||||
# <bool> use folder names from filesystem to create folders in Grafana
|
||||
foldersFromFilesStructure: true
|
||||
8
examples/infrastructure/grafana/datasources.yaml
Normal file
8
examples/infrastructure/grafana/datasources.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
access: proxy
|
||||
19
examples/infrastructure/prometheus.yaml
Normal file
19
examples/infrastructure/prometheus.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
scrape_configs:
|
||||
- job_name: clio
|
||||
scrape_interval: 5s
|
||||
scrape_timeout: 5s
|
||||
authorization:
|
||||
type: Password
|
||||
# sha256sum from password `xrp`
|
||||
# use echo -n 'your_password' | shasum -a 256 to get hash
|
||||
credentials: 0e1dcf1ff020cceabf8f4a60a32e814b5b46ee0bb8cd4af5c814e4071bd86a18
|
||||
static_configs:
|
||||
- targets:
|
||||
- host.docker.internal:51233
|
||||
- job_name: prometheus
|
||||
honor_timestamps: true
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:9090
|
||||
183
metrics.py
183
metrics.py
@@ -1,183 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
import argparse
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
def getTime(line):
|
||||
bracketOpen = line.find("[")
|
||||
bracketClose = line.find("]")
|
||||
timestampSub = line[bracketOpen+1:bracketClose]
|
||||
timestamp = datetime.strptime(timestampSub, '%Y-%m-%d %H:%M:%S.%f')
|
||||
return timestamp.timestamp()
|
||||
|
||||
def parseAccountTx(filename):
|
||||
|
||||
|
||||
with open(filename) as f:
|
||||
totalProcTime = 0.0
|
||||
totalTxnTime = 0.0
|
||||
numCalls = 0
|
||||
for line in f:
|
||||
if "executed stored_procedure" in line:
|
||||
idx = line.find("in ")
|
||||
idx = idx + 3
|
||||
idx2 = line.find("num")
|
||||
procTime = float(line[idx:idx2])
|
||||
totalProcTime += procTime
|
||||
if "fetchTransactions fetched" in line:
|
||||
idx = line.find("took ")
|
||||
idx = idx + 5
|
||||
txnTime = float(line[idx:])
|
||||
totalTxnTime += txnTime
|
||||
numCalls = numCalls + 1
|
||||
print(totalProcTime)
|
||||
print(totalProcTime/numCalls)
|
||||
print(totalTxnTime)
|
||||
print(totalTxnTime/numCalls)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def parseLogs(filename, interval, minTxnCount = 0):
|
||||
|
||||
with open(filename) as f:
|
||||
|
||||
totalTime = 0
|
||||
totalTxns = 0
|
||||
totalObjs = 0
|
||||
totalLoadTime = 0
|
||||
|
||||
|
||||
start = 0
|
||||
end = 0
|
||||
totalLedgers = 0
|
||||
|
||||
intervalTime = 0
|
||||
intervalTxns = 0
|
||||
intervalObjs = 0
|
||||
intervalLoadTime = 0
|
||||
|
||||
intervalStart = 0
|
||||
intervalEnd = 0
|
||||
intervalLedgers = 0
|
||||
ledgersPerSecond = 0
|
||||
|
||||
print("ledgers, transactions, objects, loadTime, loadTime/ledger, ledgers/sec, txns/sec, objs/sec")
|
||||
for line in f:
|
||||
if "Load phase" in line:
|
||||
sequenceIdx = line.find("Sequence : ")
|
||||
hashIdx = line.find(" Hash :")
|
||||
sequence = line[sequenceIdx + len("Sequence : "):hashIdx]
|
||||
txnCountSubstr = "txn count = "
|
||||
objCountSubstr = ". object count = "
|
||||
loadTimeSubstr = ". load time = "
|
||||
txnsSubstr = ". load txns per second = "
|
||||
objsSubstr = ". load objs per second = "
|
||||
txnCountIdx = line.find(txnCountSubstr)
|
||||
objCountIdx = line.find(objCountSubstr)
|
||||
loadTimeIdx = line.find(loadTimeSubstr)
|
||||
txnsIdx = line.find(txnsSubstr)
|
||||
objsIdx = line.find(objsSubstr)
|
||||
txnCount = line[txnCountIdx + len(txnCountSubstr):objCountIdx]
|
||||
objCount = line[objCountIdx + len(objCountSubstr):loadTimeIdx]
|
||||
loadTime = line[loadTimeIdx + len(loadTimeSubstr):txnsIdx]
|
||||
txnsPerSecond = line[txnsIdx + len(txnsSubstr):objsIdx]
|
||||
objsPerSecond = line[objsIdx + len(objsSubstr):-1]
|
||||
if int(txnCount) >= minTxnCount:
|
||||
totalTime += float(loadTime);
|
||||
totalTxns += float(txnCount)
|
||||
totalObjs += float(objCount)
|
||||
intervalTime += float(loadTime)
|
||||
intervalTxns += float(txnCount)
|
||||
intervalObjs += float(objCount)
|
||||
|
||||
totalLoadTime += float(loadTime)
|
||||
intervalLoadTime += float(loadTime)
|
||||
|
||||
|
||||
if start == 0:
|
||||
start = getTime(line)
|
||||
|
||||
|
||||
prevEnd = end
|
||||
end = getTime(line)
|
||||
|
||||
if intervalStart == 0:
|
||||
intervalStart = getTime(line)
|
||||
|
||||
intervalEnd = getTime(line)
|
||||
|
||||
totalLedgers+=1
|
||||
intervalLedgers+=1
|
||||
ledgersPerSecond = 0
|
||||
if end != start:
|
||||
ledgersPerSecond = float(totalLedgers) / float((end - start))
|
||||
intervalLedgersPerSecond = 0
|
||||
if intervalEnd != intervalStart:
|
||||
intervalLedgersPerSecond = float(intervalLedgers) / float((intervalEnd - intervalStart))
|
||||
|
||||
|
||||
|
||||
if int(sequence) % interval == 0:
|
||||
|
||||
# print("Sequence = " + sequence + " : [time, txCount, objCount, txPerSec, objsPerSec]")
|
||||
# print(loadTime + " , "
|
||||
# + txnCount + " , "
|
||||
# + objCount + " , "
|
||||
# + txnsPerSecond + " , "
|
||||
# + objsPerSecond)
|
||||
# print("Interval Aggregate ( " + str(interval) + " ) [ledgers, txns, objects, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]: ")
|
||||
print(str(intervalLedgers) + " , "
|
||||
+ str(intervalTxns) + " , "
|
||||
+ str(intervalObjs) + " , "
|
||||
+ str(intervalLoadTime) + " , "
|
||||
+ str(intervalLoadTime/intervalLedgers) + " , "
|
||||
+ str(intervalLedgers/intervalLoadTime) + " , "
|
||||
+ str(intervalTxns/intervalLoadTime) + " , "
|
||||
+ str(intervalObjs/intervalLoadTime))
|
||||
# print("Total Aggregate: [ledgers, txns, objects, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]")
|
||||
# print(str(totalLedgers) + " , "
|
||||
# + str(totalTxns) + " , "
|
||||
# + str(totalObjs) + " , "
|
||||
# + str(end-start) + " , "
|
||||
# + str(ledgersPerSecond) + " , "
|
||||
# + str(totalLoadTime/totalLedgers) + " , "
|
||||
# + str(totalTxns/totalTime) + " , "
|
||||
# + str(totalObjs/totalTime))
|
||||
if int(sequence) % interval == 0:
|
||||
intervalTime = 0
|
||||
intervalTxns = 0
|
||||
intervalObjs = 0
|
||||
intervalStart = 0
|
||||
intervalEnd = 0
|
||||
intervalLedgers = 0
|
||||
intervalLoadTime = 0
|
||||
print("Total Aggregate: [ledgers, elapsedTime, ledgersPerSec, avgLoadTime, txPerSec, objsPerSec]")
|
||||
print(totalLedgers)
|
||||
print(totalLoadTime)
|
||||
print(str(totalLedgers) + " : "
|
||||
+ str(end-start) + " : "
|
||||
+ str(ledgersPerSecond) + " : "
|
||||
+ str(totalLoadTime/totalLedgers) + " : "
|
||||
+ str(totalTxns/totalTime) + " : "
|
||||
+ str(totalObjs/totalTime))
|
||||
|
||||
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='parses logs')
|
||||
parser.add_argument("--filename")
|
||||
parser.add_argument("--interval",default=100000)
|
||||
parser.add_argument("--minTxnCount",default=0)
|
||||
parser.add_argument("--account_tx",default=False)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
def run(args):
|
||||
if args.account_tx:
|
||||
parseAccountTx(args.filename)
|
||||
else:
|
||||
parseLogs(args.filename, int(args.interval))
|
||||
|
||||
run(args)
|
||||
@@ -1,65 +0,0 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_BACKENDFACTORY_H_INCLUDED
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <backend/BackendInterface.h>
|
||||
#include <backend/CassandraBackend.h>
|
||||
#include <backend/PostgresBackend.h>
|
||||
|
||||
namespace Backend {
|
||||
std::shared_ptr<BackendInterface>
|
||||
make_Backend(boost::asio::io_context& ioc, boost::json::object const& config)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << ": Constructing BackendInterface";
|
||||
|
||||
boost::json::object dbConfig = config.at("database").as_object();
|
||||
|
||||
bool readOnly = false;
|
||||
if (config.contains("read_only"))
|
||||
readOnly = config.at("read_only").as_bool();
|
||||
|
||||
auto type = dbConfig.at("type").as_string();
|
||||
|
||||
std::shared_ptr<BackendInterface> backend = nullptr;
|
||||
|
||||
if (boost::iequals(type, "cassandra"))
|
||||
{
|
||||
if (config.contains("online_delete"))
|
||||
dbConfig.at(type).as_object()["ttl"] =
|
||||
config.at("online_delete").as_int64() * 4;
|
||||
backend = std::make_shared<CassandraBackend>(
|
||||
ioc, dbConfig.at(type).as_object());
|
||||
}
|
||||
else if (boost::iequals(type, "postgres"))
|
||||
{
|
||||
if (dbConfig.contains("experimental") &&
|
||||
dbConfig.at("experimental").is_bool() &&
|
||||
dbConfig.at("experimental").as_bool())
|
||||
backend = std::make_shared<PostgresBackend>(
|
||||
ioc, dbConfig.at(type).as_object());
|
||||
else
|
||||
BOOST_LOG_TRIVIAL(fatal)
|
||||
<< "Postgres support is experimental at this time. "
|
||||
<< "If you would really like to use Postgres, add "
|
||||
"\"experimental\":true to your database config";
|
||||
}
|
||||
|
||||
if (!backend)
|
||||
throw std::runtime_error("Invalid database type");
|
||||
|
||||
backend->open(readOnly);
|
||||
auto rng = backend->hardFetchLedgerRangeNoThrow();
|
||||
if (rng)
|
||||
{
|
||||
backend->updateRange(rng->minSequence);
|
||||
backend->updateRange(rng->maxSequence);
|
||||
}
|
||||
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << ": Constructed BackendInterface Successfully";
|
||||
|
||||
return backend;
|
||||
}
|
||||
} // namespace Backend
|
||||
|
||||
#endif // RIPPLE_REPORTING_BACKEND_FACTORY
|
||||
@@ -1,328 +0,0 @@
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <backend/BackendInterface.h>
|
||||
namespace Backend {
|
||||
bool
|
||||
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
|
||||
{
|
||||
auto commitRes = doFinishWrites();
|
||||
if (commitRes)
|
||||
{
|
||||
updateRange(ledgerSequence);
|
||||
}
|
||||
return commitRes;
|
||||
}
|
||||
void
|
||||
BackendInterface::writeLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob)
|
||||
{
|
||||
assert(key.size() == sizeof(ripple::uint256));
|
||||
ripple::uint256 key256 = ripple::uint256::fromVoid(key.data());
|
||||
doWriteLedgerObject(std::move(key), seq, std::move(blob));
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow(
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__;
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
{
|
||||
return hardFetchLedgerRange(yield);
|
||||
}
|
||||
catch (DatabaseTimeout& t)
|
||||
{
|
||||
;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow() const
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__;
|
||||
return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
|
||||
}
|
||||
|
||||
// *** state data methods
|
||||
std::optional<Blob>
|
||||
BackendInterface::fetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
auto obj = cache_.get(key, sequence);
|
||||
if (obj)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache hit - " << ripple::strHex(key);
|
||||
return *obj;
|
||||
}
|
||||
else
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache miss - " << ripple::strHex(key);
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj)
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - missed cache and missed in db";
|
||||
else
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - missed cache but found in db";
|
||||
return dbObj;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<Blob>
|
||||
BackendInterface::fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
std::vector<Blob> results;
|
||||
results.resize(keys.size());
|
||||
std::vector<ripple::uint256> misses;
|
||||
for (size_t i = 0; i < keys.size(); ++i)
|
||||
{
|
||||
auto obj = cache_.get(keys[i], sequence);
|
||||
if (obj)
|
||||
results[i] = *obj;
|
||||
else
|
||||
misses.push_back(keys[i]);
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache hits = " << keys.size() - misses.size()
|
||||
<< " - cache misses = " << misses.size();
|
||||
|
||||
if (misses.size())
|
||||
{
|
||||
auto objs = doFetchLedgerObjects(misses, sequence, yield);
|
||||
for (size_t i = 0, j = 0; i < results.size(); ++i)
|
||||
{
|
||||
if (results[i].size() == 0)
|
||||
{
|
||||
results[i] = objs[j];
|
||||
++j;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
// Fetches the successor to key/index
|
||||
std::optional<ripple::uint256>
|
||||
BackendInterface::fetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
auto succ = cache_.getSuccessor(key, ledgerSequence);
|
||||
if (succ)
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache hit - " << ripple::strHex(key);
|
||||
else
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " - cache miss - " << ripple::strHex(key);
|
||||
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
|
||||
}
|
||||
|
||||
std::optional<LedgerObject>
|
||||
BackendInterface::fetchSuccessorObject(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
auto succ = fetchSuccessorKey(key, ledgerSequence, yield);
|
||||
if (succ)
|
||||
{
|
||||
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
|
||||
if (!obj)
|
||||
return {{*succ, {}}};
|
||||
|
||||
return {{*succ, *obj}};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
BookOffersPage
|
||||
BackendInterface::fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
// TODO try to speed this up. This can take a few seconds. The goal is
|
||||
// to get it down to a few hundred milliseconds.
|
||||
BookOffersPage page;
|
||||
const ripple::uint256 bookEnd = ripple::getQualityNext(book);
|
||||
ripple::uint256 uTipIndex = book;
|
||||
std::vector<ripple::uint256> keys;
|
||||
auto getMillis = [](auto diff) {
|
||||
return std::chrono::duration_cast<std::chrono::milliseconds>(diff)
|
||||
.count();
|
||||
};
|
||||
auto begin = std::chrono::system_clock::now();
|
||||
std::uint32_t numSucc = 0;
|
||||
std::uint32_t numPages = 0;
|
||||
long succMillis = 0;
|
||||
long pageMillis = 0;
|
||||
while (keys.size() < limit)
|
||||
{
|
||||
auto mid1 = std::chrono::system_clock::now();
|
||||
auto offerDir = fetchSuccessorObject(uTipIndex, ledgerSequence, yield);
|
||||
auto mid2 = std::chrono::system_clock::now();
|
||||
numSucc++;
|
||||
succMillis += getMillis(mid2 - mid1);
|
||||
if (!offerDir || offerDir->key >= bookEnd)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " - offerDir.has_value() "
|
||||
<< offerDir.has_value() << " breaking";
|
||||
break;
|
||||
}
|
||||
uTipIndex = offerDir->key;
|
||||
while (keys.size() < limit)
|
||||
{
|
||||
++numPages;
|
||||
ripple::STLedgerEntry sle{
|
||||
ripple::SerialIter{
|
||||
offerDir->blob.data(), offerDir->blob.size()},
|
||||
offerDir->key};
|
||||
auto indexes = sle.getFieldV256(ripple::sfIndexes);
|
||||
keys.insert(keys.end(), indexes.begin(), indexes.end());
|
||||
auto next = sle.getFieldU64(ripple::sfIndexNext);
|
||||
if (!next)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " next is empty. breaking";
|
||||
break;
|
||||
}
|
||||
auto nextKey = ripple::keylet::page(uTipIndex, next);
|
||||
auto nextDir =
|
||||
fetchLedgerObject(nextKey.key, ledgerSequence, yield);
|
||||
assert(nextDir);
|
||||
offerDir->blob = *nextDir;
|
||||
offerDir->key = nextKey.key;
|
||||
}
|
||||
auto mid3 = std::chrono::system_clock::now();
|
||||
pageMillis += getMillis(mid3 - mid2);
|
||||
}
|
||||
auto mid = std::chrono::system_clock::now();
|
||||
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " key = " << ripple::strHex(keys[i])
|
||||
<< " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
assert(objs[i].size());
|
||||
page.offers.push_back({keys[i], objs[i]});
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " "
|
||||
<< "Fetching " << std::to_string(keys.size()) << " offers took "
|
||||
<< std::to_string(getMillis(mid - begin))
|
||||
<< " milliseconds. Fetching next dir took "
|
||||
<< std::to_string(succMillis) << " milliseonds. Fetched next dir "
|
||||
<< std::to_string(numSucc) << " times"
|
||||
<< " Fetching next page of dir took " << std::to_string(pageMillis)
|
||||
<< " milliseconds"
|
||||
<< ". num pages = " << std::to_string(numPages)
|
||||
<< ". Fetching all objects took "
|
||||
<< std::to_string(getMillis(end - mid))
|
||||
<< " milliseconds. total time = "
|
||||
<< std::to_string(getMillis(end - begin)) << " milliseconds";
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
LedgerPage
|
||||
BackendInterface::fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
LedgerPage page;
|
||||
|
||||
std::vector<ripple::uint256> keys;
|
||||
bool reachedEnd = false;
|
||||
while (keys.size() < limit && !reachedEnd)
|
||||
{
|
||||
ripple::uint256 const& curCursor = keys.size() ? keys.back()
|
||||
: cursor ? *cursor
|
||||
: firstKey;
|
||||
uint32_t seq = outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
||||
if (!succ)
|
||||
reachedEnd = true;
|
||||
else
|
||||
keys.push_back(std::move(*succ));
|
||||
}
|
||||
|
||||
auto objects = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < objects.size(); ++i)
|
||||
{
|
||||
if (objects[i].size())
|
||||
page.objects.push_back({std::move(keys[i]), std::move(objects[i])});
|
||||
else if (!outOfOrder)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " incorrect successor table. key = "
|
||||
<< ripple::strHex(keys[i]) << " - seq = " << ledgerSequence;
|
||||
std::stringstream msg;
|
||||
for (size_t j = 0; j < objects.size(); ++j)
|
||||
{
|
||||
msg << " - " << ripple::strHex(keys[j]);
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << msg.str();
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
if (keys.size() && !reachedEnd)
|
||||
page.cursor = keys.back();
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
std::optional<ripple::Fees>
|
||||
BackendInterface::fetchFees(
|
||||
std::uint32_t const seq,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
ripple::Fees fees;
|
||||
|
||||
auto key = ripple::keylet::fees().key;
|
||||
auto bytes = fetchLedgerObject(key, seq, yield);
|
||||
|
||||
if (!bytes)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - could not find fees";
|
||||
return {};
|
||||
}
|
||||
|
||||
ripple::SerialIter it(bytes->data(), bytes->size());
|
||||
ripple::SLE sle{it, key};
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfBaseFee) != -1)
|
||||
fees.base = sle.getFieldU64(ripple::sfBaseFee);
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfReferenceFeeUnits) != -1)
|
||||
fees.units = sle.getFieldU32(ripple::sfReferenceFeeUnits);
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfReserveBase) != -1)
|
||||
fees.reserve = sle.getFieldU32(ripple::sfReserveBase);
|
||||
|
||||
if (sle.getFieldIndex(ripple::sfReserveIncrement) != -1)
|
||||
fees.increment = sle.getFieldU32(ripple::sfReserveIncrement);
|
||||
|
||||
return fees;
|
||||
}
|
||||
|
||||
} // namespace Backend
|
||||
@@ -1,338 +0,0 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_BACKENDINTERFACE_H_INCLUDED
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <boost/asio.hpp>
|
||||
#include <backend/DBHelpers.h>
|
||||
#include <backend/SimpleCache.h>
|
||||
#include <backend/Types.h>
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
namespace Backend {
|
||||
|
||||
class DatabaseTimeout : public std::exception
|
||||
{
|
||||
const char*
|
||||
what() const throw() override
|
||||
{
|
||||
return "Database read timed out. Please retry the request";
|
||||
}
|
||||
};
|
||||
|
||||
template <class F>
|
||||
auto
|
||||
retryOnTimeout(F func, size_t waitMs = 500)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
try
|
||||
{
|
||||
return func();
|
||||
}
|
||||
catch (DatabaseTimeout& t)
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " function timed out. Retrying ... ";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class F>
|
||||
auto
|
||||
synchronous(F&& f)
|
||||
{
|
||||
boost::asio::io_context ctx;
|
||||
boost::asio::io_context::strand strand(ctx);
|
||||
std::optional<boost::asio::io_context::work> work;
|
||||
|
||||
work.emplace(ctx);
|
||||
|
||||
using R = typename std::result_of<F(boost::asio::yield_context&)>::type;
|
||||
if constexpr (!std::is_same<R, void>::value)
|
||||
{
|
||||
R res;
|
||||
boost::asio::spawn(
|
||||
strand, [&f, &work, &res](boost::asio::yield_context yield) {
|
||||
res = f(yield);
|
||||
work.reset();
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
return res;
|
||||
}
|
||||
else
|
||||
{
|
||||
boost::asio::spawn(
|
||||
strand, [&f, &work](boost::asio::yield_context yield) {
|
||||
f(yield);
|
||||
work.reset();
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
}
|
||||
}
|
||||
|
||||
template <class F>
|
||||
auto
|
||||
synchronousAndRetryOnTimeout(F&& f)
|
||||
{
|
||||
return retryOnTimeout([&]() { return synchronous(f); });
|
||||
}
|
||||
|
||||
class BackendInterface
|
||||
{
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
SimpleCache cache_;
|
||||
|
||||
// mutex used for open() and close()
|
||||
mutable std::mutex mutex_;
|
||||
|
||||
public:
|
||||
BackendInterface(boost::json::object const& config)
|
||||
{
|
||||
}
|
||||
virtual ~BackendInterface()
|
||||
{
|
||||
}
|
||||
|
||||
// *** public read methods ***
|
||||
// All of these reads methods can throw DatabaseTimeout. When writing code
|
||||
// in an RPC handler, this exception does not need to be caught: when an RPC
|
||||
// results in a timeout, an error is returned to the client
|
||||
public:
|
||||
// *** ledger methods
|
||||
//
|
||||
|
||||
SimpleCache const&
|
||||
cache() const
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
SimpleCache&
|
||||
cache()
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
virtual std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerBySequence(
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
virtual std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerByHash(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
virtual std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
std::optional<LedgerRange>
|
||||
fetchLedgerRange() const
|
||||
{
|
||||
std::shared_lock lck(rngMtx_);
|
||||
return range;
|
||||
}
|
||||
|
||||
void
|
||||
updateRange(uint32_t newMax)
|
||||
{
|
||||
std::unique_lock lck(rngMtx_);
|
||||
assert(!range || newMax >= range->maxSequence);
|
||||
if (!range)
|
||||
range = {newMax, newMax};
|
||||
else
|
||||
range->maxSequence = newMax;
|
||||
}
|
||||
|
||||
std::optional<ripple::Fees>
|
||||
fetchFees(std::uint32_t const seq, boost::asio::yield_context& yield) const;
|
||||
|
||||
// *** transaction methods
|
||||
virtual std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(
|
||||
std::vector<ripple::uint256> const& hashes,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
virtual AccountTransactions
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<AccountTransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// *** state data methods
|
||||
std::optional<Blob>
|
||||
fetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
std::vector<Blob>
|
||||
fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
virtual std::optional<Blob>
|
||||
doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
virtual std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
virtual std::vector<LedgerObject>
|
||||
fetchLedgerDiff(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// Fetches a page of ledger objects, ordered by key/index.
|
||||
// Used by ledger_data
|
||||
LedgerPage
|
||||
fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
// Fetches the successor to key/index
|
||||
std::optional<LedgerObject>
|
||||
fetchSuccessorObject(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
std::optional<ripple::uint256>
|
||||
fetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const;
|
||||
// Fetches the successor to key/index
|
||||
|
||||
virtual std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
BookOffersPage
|
||||
fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
boost::asio::yield_context& yield) const;
|
||||
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange() const
|
||||
{
|
||||
return synchronous([&](boost::asio::yield_context yield) {
|
||||
return hardFetchLedgerRange(yield);
|
||||
});
|
||||
}
|
||||
|
||||
virtual std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// Doesn't throw DatabaseTimeout. Should be used with care.
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow() const;
|
||||
// Doesn't throw DatabaseTimeout. Should be used with care.
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow(boost::asio::yield_context& yield) const;
|
||||
|
||||
virtual void
|
||||
writeLedger(
|
||||
ripple::LedgerInfo const& ledgerInfo,
|
||||
std::string&& ledgerHeader) = 0;
|
||||
|
||||
virtual void
|
||||
writeLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob);
|
||||
|
||||
virtual void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) = 0;
|
||||
|
||||
virtual void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData>&& data) = 0;
|
||||
|
||||
virtual void
|
||||
writeSuccessor(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& successor) = 0;
|
||||
|
||||
// Tell the database we are about to begin writing data for a particular
|
||||
// ledger.
|
||||
virtual void
|
||||
startWrites() const = 0;
|
||||
|
||||
// Tell the database we have finished writing all data for a particular
|
||||
// ledger
|
||||
// TODO change the return value to represent different results. committed,
|
||||
// write conflict, errored, successful but not committed
|
||||
bool
|
||||
finishWrites(std::uint32_t const ledgerSequence);
|
||||
|
||||
virtual bool
|
||||
doOnlineDelete(
|
||||
std::uint32_t numLedgersToKeep,
|
||||
boost::asio::yield_context& yield) const = 0;
|
||||
|
||||
// Open the database. Set up all of the necessary objects and
|
||||
// datastructures. After this call completes, the database is ready for
|
||||
// use.
|
||||
virtual void
|
||||
open(bool readOnly) = 0;
|
||||
|
||||
// Close the database, releasing any resources
|
||||
virtual void
|
||||
close(){};
|
||||
|
||||
// *** private helper methods
|
||||
private:
|
||||
virtual void
|
||||
doWriteLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob) = 0;
|
||||
|
||||
virtual bool
|
||||
doFinishWrites() = 0;
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
using BackendInterface = Backend::BackendInterface;
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,130 +0,0 @@
|
||||
#ifndef CLIO_BACKEND_DBHELPERS_H_INCLUDED
|
||||
#define CLIO_BACKEND_DBHELPERS_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/Log.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STAccount.h>
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
#include <boost/container/flat_set.hpp>
|
||||
#include <backend/Pg.h>
|
||||
#include <backend/Types.h>
|
||||
|
||||
/// Struct used to keep track of what to write to transactions and
|
||||
/// account_transactions tables in Postgres
|
||||
struct AccountTransactionsData
|
||||
{
|
||||
boost::container::flat_set<ripple::AccountID> accounts;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
ripple::uint256 txHash;
|
||||
|
||||
AccountTransactionsData(
|
||||
ripple::TxMeta& meta,
|
||||
ripple::uint256 const& txHash,
|
||||
beast::Journal& j)
|
||||
: accounts(meta.getAffectedAccounts())
|
||||
, ledgerSequence(meta.getLgrSeq())
|
||||
, transactionIndex(meta.getIndex())
|
||||
, txHash(txHash)
|
||||
{
|
||||
}
|
||||
|
||||
AccountTransactionsData() = default;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
inline bool
|
||||
isOffer(T const& object)
|
||||
{
|
||||
short offer_bytes = (object[1] << 8) | object[2];
|
||||
return offer_bytes == 0x006f;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline bool
|
||||
isOfferHex(T const& object)
|
||||
{
|
||||
auto blob = ripple::strUnHex(4, object.begin(), object.begin() + 4);
|
||||
if (blob)
|
||||
{
|
||||
short offer_bytes = ((*blob)[1] << 8) | (*blob)[2];
|
||||
return offer_bytes == 0x006f;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline bool
|
||||
isDirNode(T const& object)
|
||||
{
|
||||
short spaceKey = (object.data()[1] << 8) | object.data()[2];
|
||||
return spaceKey == 0x0064;
|
||||
}
|
||||
|
||||
template <class T, class R>
|
||||
inline bool
|
||||
isBookDir(T const& key, R const& object)
|
||||
{
|
||||
if (!isDirNode(object))
|
||||
return false;
|
||||
|
||||
ripple::STLedgerEntry const sle{
|
||||
ripple::SerialIter{object.data(), object.size()}, key};
|
||||
return !sle[~ripple::sfOwner].has_value();
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline ripple::uint256
|
||||
getBook(T const& offer)
|
||||
{
|
||||
ripple::SerialIter it{offer.data(), offer.size()};
|
||||
ripple::SLE sle{it, {}};
|
||||
ripple::uint256 book = sle.getFieldH256(ripple::sfBookDirectory);
|
||||
return book;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline ripple::uint256
|
||||
getBookBase(T const& key)
|
||||
{
|
||||
assert(key.size() == ripple::uint256::size());
|
||||
ripple::uint256 ret;
|
||||
for (size_t i = 0; i < 24; ++i)
|
||||
{
|
||||
ret.data()[i] = key.data()[i];
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline ripple::LedgerInfo
|
||||
deserializeHeader(ripple::Slice data)
|
||||
{
|
||||
ripple::SerialIter sit(data.data(), data.size());
|
||||
|
||||
ripple::LedgerInfo info;
|
||||
|
||||
info.seq = sit.get32();
|
||||
info.drops = sit.get64();
|
||||
info.parentHash = sit.get256();
|
||||
info.txHash = sit.get256();
|
||||
info.accountHash = sit.get256();
|
||||
info.parentCloseTime =
|
||||
ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}};
|
||||
info.closeTime =
|
||||
ripple::NetClock::time_point{ripple::NetClock::duration{sit.get32()}};
|
||||
info.closeTimeResolution = ripple::NetClock::duration{sit.get8()};
|
||||
info.closeFlags = sit.get8();
|
||||
|
||||
info.hash = sit.get256();
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
inline std::string
|
||||
uint256ToString(ripple::uint256 const& uint)
|
||||
{
|
||||
return {reinterpret_cast<const char*>(uint.data()), uint.size()};
|
||||
}
|
||||
|
||||
static constexpr std::uint32_t rippleEpochStart = 946684800;
|
||||
#endif
|
||||
@@ -1,110 +0,0 @@
|
||||
#include <backend/LayeredCache.h>
|
||||
namespace Backend {
|
||||
|
||||
void
|
||||
LayeredCache::insert(
|
||||
ripple::uint256 const& key,
|
||||
Blob const& value,
|
||||
uint32_t seq)
|
||||
{
|
||||
auto entry = map_[key];
|
||||
// stale insert, do nothing
|
||||
if (seq <= entry.recent.seq)
|
||||
return;
|
||||
entry.old = entry.recent;
|
||||
entry.recent = {seq, value};
|
||||
if (value.empty())
|
||||
pendingDeletes_.push_back(key);
|
||||
if (!entry.old.blob.empty())
|
||||
pendingSweeps_.push_back(key);
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
LayeredCache::select(CacheEntry const& entry, uint32_t seq) const
|
||||
{
|
||||
if (seq < entry.old.seq)
|
||||
return {};
|
||||
if (seq < entry.recent.seq && !entry.old.blob.empty())
|
||||
return entry.old.blob;
|
||||
if (!entry.recent.blob.empty())
|
||||
return entry.recent.blob;
|
||||
return {};
|
||||
}
|
||||
void
|
||||
LayeredCache::update(std::vector<LedgerObject> const& blobs, uint32_t seq)
|
||||
{
|
||||
std::unique_lock lck{mtx_};
|
||||
if (seq > mostRecentSequence_)
|
||||
mostRecentSequence_ = seq;
|
||||
for (auto const& k : pendingSweeps_)
|
||||
{
|
||||
auto e = map_[k];
|
||||
e.old = {};
|
||||
}
|
||||
for (auto const& k : pendingDeletes_)
|
||||
{
|
||||
map_.erase(k);
|
||||
}
|
||||
for (auto const& b : blobs)
|
||||
{
|
||||
insert(b.key, b.blob, seq);
|
||||
}
|
||||
}
|
||||
std::optional<LedgerObject>
|
||||
LayeredCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
ripple::uint256 curKey = key;
|
||||
while (true)
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
if (seq < mostRecentSequence_ - 1)
|
||||
return {};
|
||||
auto e = map_.upper_bound(curKey);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
auto const& entry = e->second;
|
||||
auto blob = select(entry, seq);
|
||||
if (!blob)
|
||||
{
|
||||
curKey = e->first;
|
||||
continue;
|
||||
}
|
||||
else
|
||||
return {{e->first, *blob}};
|
||||
}
|
||||
}
|
||||
std::optional<LedgerObject>
|
||||
LayeredCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
ripple::uint256 curKey = key;
|
||||
std::shared_lock lck{mtx_};
|
||||
while (true)
|
||||
{
|
||||
if (seq < mostRecentSequence_ - 1)
|
||||
return {};
|
||||
auto e = map_.lower_bound(curKey);
|
||||
--e;
|
||||
if (e == map_.begin())
|
||||
return {};
|
||||
auto const& entry = e->second;
|
||||
auto blob = select(entry, seq);
|
||||
if (!blob)
|
||||
{
|
||||
curKey = e->first;
|
||||
continue;
|
||||
}
|
||||
else
|
||||
return {{e->first, *blob}};
|
||||
}
|
||||
}
|
||||
std::optional<Blob>
|
||||
LayeredCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
auto e = map_.find(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
auto const& entry = e->second;
|
||||
return select(entry, seq);
|
||||
}
|
||||
} // namespace Backend
|
||||
@@ -1,73 +0,0 @@
|
||||
#ifndef CLIO_LAYEREDCACHE_H_INCLUDED
|
||||
#define CLIO_LAYEREDCACHE_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <backend/Types.h>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <shared_mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
namespace Backend {
|
||||
class LayeredCache
|
||||
{
|
||||
struct SeqBlobPair
|
||||
{
|
||||
uint32_t seq;
|
||||
Blob blob;
|
||||
};
|
||||
struct CacheEntry
|
||||
{
|
||||
SeqBlobPair recent;
|
||||
SeqBlobPair old;
|
||||
};
|
||||
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
std::vector<ripple::uint256> pendingDeletes_;
|
||||
std::vector<ripple::uint256> pendingSweeps_;
|
||||
mutable std::shared_mutex mtx_;
|
||||
uint32_t mostRecentSequence_;
|
||||
|
||||
void
|
||||
insert(ripple::uint256 const& key, Blob const& value, uint32_t seq);
|
||||
|
||||
/*
|
||||
void
|
||||
insert(ripple::uint256 const& key, Blob const& value, uint32_t seq)
|
||||
{
|
||||
map_.emplace(key,{{seq,value,{}});
|
||||
}
|
||||
void
|
||||
update(ripple::uint256 const& key, Blob const& value, uint32_t seq)
|
||||
{
|
||||
auto& entry = map_.find(key);
|
||||
entry.old = entry.recent;
|
||||
entry.recent = {seq, value};
|
||||
pendingSweeps_.push_back(key);
|
||||
}
|
||||
void
|
||||
erase(ripple::uint256 const& key, uint32_t seq)
|
||||
{
|
||||
update(key, {}, seq);
|
||||
pendingDeletes_.push_back(key);
|
||||
}
|
||||
*/
|
||||
std::optional<Blob>
|
||||
select(CacheEntry const& entry, uint32_t seq) const;
|
||||
|
||||
public:
|
||||
void
|
||||
update(std::vector<LedgerObject> const& blobs, uint32_t seq);
|
||||
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
#endif
|
||||
1789
src/backend/Pg.cpp
1789
src/backend/Pg.cpp
File diff suppressed because it is too large
Load Diff
571
src/backend/Pg.h
571
src/backend/Pg.h
@@ -1,571 +0,0 @@
|
||||
#ifndef RIPPLE_CORE_PG_H_INCLUDED
|
||||
#define RIPPLE_CORE_PG_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/StringUtilities.h>
|
||||
#include <ripple/basics/chrono.h>
|
||||
#include <ripple/ledger/ReadView.h>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/ip/tcp.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/icl/closed_interval.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <boost/log/trivial.hpp>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <libpq-fe.h>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// These postgres structs must be freed only by the postgres API.
|
||||
using pg_result_type = std::unique_ptr<PGresult, void (*)(PGresult*)>;
|
||||
using pg_connection_type = std::unique_ptr<PGconn, void (*)(PGconn*)>;
|
||||
using asio_socket_type = std::unique_ptr<
|
||||
boost::asio::ip::tcp::socket,
|
||||
void (*)(boost::asio::ip::tcp::socket*)>;
|
||||
|
||||
/** first: command
|
||||
* second: parameter values
|
||||
*
|
||||
* The 2nd member takes an optional string to
|
||||
* distinguish between NULL parameters and empty strings. An empty
|
||||
* item corresponds to a NULL parameter.
|
||||
*
|
||||
* Postgres reads each parameter as a c-string, regardless of actual type.
|
||||
* Binary types (bytea) need to be converted to hex and prepended with
|
||||
* \x ("\\x").
|
||||
*/
|
||||
using pg_params =
|
||||
std::pair<char const*, std::vector<std::optional<std::string>>>;
|
||||
|
||||
/** Parameter values for pg API. */
|
||||
using pg_formatted_params = std::vector<char const*>;
|
||||
|
||||
/** Parameters for managing postgres connections. */
|
||||
struct PgConfig
|
||||
{
|
||||
/** Maximum connections allowed to db. */
|
||||
std::size_t max_connections{1000};
|
||||
/** Close idle connections past this duration. */
|
||||
std::chrono::seconds timeout{600};
|
||||
|
||||
/** Index of DB connection parameter names. */
|
||||
std::vector<char const*> keywordsIdx;
|
||||
/** DB connection parameter names. */
|
||||
std::vector<std::string> keywords;
|
||||
/** Index of DB connection parameter values. */
|
||||
std::vector<char const*> valuesIdx;
|
||||
/** DB connection parameter values. */
|
||||
std::vector<std::string> values;
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Class that operates on postgres query results.
|
||||
*
|
||||
* The functions that return results do not check first whether the
|
||||
* expected results are actually there. Therefore, the caller first needs
|
||||
* to check whether or not a valid response was returned using the operator
|
||||
* bool() overload. If number of tuples or fields are unknown, then check
|
||||
* those. Each result field should be checked for null before attempting
|
||||
* to return results. Finally, the caller must know the type of the field
|
||||
* before calling the corresponding function to return a field. Postgres
|
||||
* internally stores each result field as null-terminated strings.
|
||||
*/
|
||||
class PgResult
|
||||
{
|
||||
// The result object must be freed using the libpq API PQclear() call.
|
||||
pg_result_type result_{nullptr, [](PGresult* result) { PQclear(result); }};
|
||||
std::optional<std::pair<ExecStatusType, std::string>> error_;
|
||||
|
||||
public:
|
||||
/** Constructor for when the process is stopping.
|
||||
*
|
||||
*/
|
||||
PgResult()
|
||||
{
|
||||
}
|
||||
|
||||
/** Constructor for successful query results.
|
||||
*
|
||||
* @param result Query result.
|
||||
*/
|
||||
explicit PgResult(pg_result_type&& result) : result_(std::move(result))
|
||||
{
|
||||
}
|
||||
|
||||
/** Constructor for failed query results.
|
||||
*
|
||||
* @param result Query result that contains error information.
|
||||
* @param conn Postgres connection that contains error information.
|
||||
*/
|
||||
PgResult(PGresult* result, PGconn* conn)
|
||||
: error_({PQresultStatus(result), PQerrorMessage(conn)})
|
||||
{
|
||||
}
|
||||
|
||||
/** Return field as a null-terminated string pointer.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null.
|
||||
*
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
char const*
|
||||
c_str(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return PQgetvalue(result_.get(), ntuple, nfield);
|
||||
}
|
||||
|
||||
std::vector<unsigned char>
|
||||
asUnHexedBlob(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
std::string_view view{c_str(ntuple, nfield) + 2};
|
||||
auto res = ripple::strUnHex(view.size(), view.cbegin(), view.cend());
|
||||
if (res)
|
||||
return *res;
|
||||
return {};
|
||||
}
|
||||
|
||||
ripple::uint256
|
||||
asUInt256(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
ripple::uint256 val;
|
||||
if (!val.parseHex(c_str(ntuple, nfield) + 2))
|
||||
throw std::runtime_error("Pg - failed to parse hex into uint256");
|
||||
return val;
|
||||
}
|
||||
|
||||
/** Return field as equivalent to Postgres' INT type (32 bit signed).
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null, or that the type is that requested.
|
||||
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
std::int32_t
|
||||
asInt(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return boost::lexical_cast<std::int32_t>(
|
||||
PQgetvalue(result_.get(), ntuple, nfield));
|
||||
}
|
||||
|
||||
/** Return field as equivalent to Postgres' BIGINT type (64 bit signed).
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist, or that the field is
|
||||
* not null, or that the type is that requested.
|
||||
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Field contents.
|
||||
*/
|
||||
std::int64_t
|
||||
asBigInt(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return boost::lexical_cast<std::int64_t>(
|
||||
PQgetvalue(result_.get(), ntuple, nfield));
|
||||
}
|
||||
|
||||
/** Returns whether the field is NULL or not.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists, or that the row and fields exist.
|
||||
*
|
||||
* @param ntuple Row number.
|
||||
* @param nfield Field number.
|
||||
* @return Whether field is NULL.
|
||||
*/
|
||||
bool
|
||||
isNull(int ntuple = 0, int nfield = 0) const
|
||||
{
|
||||
return PQgetisnull(result_.get(), ntuple, nfield);
|
||||
}
|
||||
|
||||
/** Check whether a valid response occurred.
|
||||
*
|
||||
* @return Whether or not the query returned a valid response.
|
||||
*/
|
||||
operator bool() const
|
||||
{
|
||||
return result_ != nullptr;
|
||||
}
|
||||
|
||||
/** Message describing the query results suitable for diagnostics.
|
||||
*
|
||||
* If error, then the postgres error type and message are returned.
|
||||
* Otherwise, "ok"
|
||||
*
|
||||
* @return Query result message.
|
||||
*/
|
||||
std::string
|
||||
msg() const;
|
||||
|
||||
/** Get number of rows in result.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return Number of result rows.
|
||||
*/
|
||||
int
|
||||
ntuples() const
|
||||
{
|
||||
return PQntuples(result_.get());
|
||||
}
|
||||
|
||||
/** Get number of fields in result.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return Number of result fields.
|
||||
*/
|
||||
int
|
||||
nfields() const
|
||||
{
|
||||
return PQnfields(result_.get());
|
||||
}
|
||||
|
||||
/** Return result status of the command.
|
||||
*
|
||||
* Note that this function does not guarantee that the result struct
|
||||
* exists.
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
ExecStatusType
|
||||
status() const
|
||||
{
|
||||
return PQresultStatus(result_.get());
|
||||
}
|
||||
};
|
||||
|
||||
/* Class that contains and operates upon a postgres connection. */
|
||||
class Pg
|
||||
{
|
||||
friend class PgPool;
|
||||
friend class PgQuery;
|
||||
|
||||
PgConfig const& config_;
|
||||
boost::asio::io_context::strand strand_;
|
||||
bool& stop_;
|
||||
std::mutex& mutex_;
|
||||
|
||||
asio_socket_type socket_{nullptr, [](boost::asio::ip::tcp::socket*) {}};
|
||||
|
||||
// The connection object must be freed using the libpq API PQfinish() call.
|
||||
pg_connection_type conn_{nullptr, [](PGconn* conn) { PQfinish(conn); }};
|
||||
|
||||
inline asio_socket_type
|
||||
getSocket(boost::asio::yield_context& strand);
|
||||
|
||||
inline PgResult
|
||||
waitForStatus(boost::asio::yield_context& yield, ExecStatusType expected);
|
||||
|
||||
inline void
|
||||
flush(boost::asio::yield_context& yield);
|
||||
|
||||
/** Clear results from the connection.
|
||||
*
|
||||
* Results from previous commands must be cleared before new commands
|
||||
* can be processed. This function should be called on connections
|
||||
* that weren't processed completely before being reused, such as
|
||||
* when being checked-in.
|
||||
*
|
||||
* @return whether or not connection still exists.
|
||||
*/
|
||||
bool
|
||||
clear();
|
||||
|
||||
/** Connect to postgres.
|
||||
*
|
||||
* Idempotently connects to postgres by first checking whether an
|
||||
* existing connection is already present. If connection is not present
|
||||
* or in an errored state, reconnects to the database.
|
||||
*/
|
||||
void
|
||||
connect(boost::asio::yield_context& yield);
|
||||
|
||||
/** Disconnect from postgres. */
|
||||
void
|
||||
disconnect()
|
||||
{
|
||||
conn_.reset();
|
||||
socket_.reset();
|
||||
}
|
||||
|
||||
/** Execute postgres query.
|
||||
*
|
||||
* If parameters are included, then the command should contain only a
|
||||
* single SQL statement. If no parameters, then multiple SQL statements
|
||||
* delimited by semi-colons can be processed. The response is from
|
||||
* the last command executed.
|
||||
*
|
||||
* @param command postgres API command string.
|
||||
* @param nParams postgres API number of parameters.
|
||||
* @param values postgres API array of parameter.
|
||||
* @return Query result object.
|
||||
*/
|
||||
PgResult
|
||||
query(
|
||||
char const* command,
|
||||
std::size_t const nParams,
|
||||
char const* const* values,
|
||||
boost::asio::yield_context& yield);
|
||||
|
||||
/** Execute postgres query with no parameters.
|
||||
*
|
||||
* @param command Query string.
|
||||
* @return Query result object;
|
||||
*/
|
||||
PgResult
|
||||
query(char const* command, boost::asio::yield_context& yield)
|
||||
{
|
||||
return query(command, 0, nullptr, yield);
|
||||
}
|
||||
|
||||
/** Execute postgres query with parameters.
|
||||
*
|
||||
* @param dbParams Database command and parameter values.
|
||||
* @return Query result object.
|
||||
*/
|
||||
PgResult
|
||||
query(pg_params const& dbParams, boost::asio::yield_context& yield);
|
||||
|
||||
/** Insert multiple records into a table using Postgres' bulk COPY.
|
||||
*
|
||||
* Throws upon error.
|
||||
*
|
||||
* @param table Name of table for import.
|
||||
* @param records Records in the COPY IN format.
|
||||
*/
|
||||
void
|
||||
bulkInsert(
|
||||
char const* table,
|
||||
std::string const& records,
|
||||
boost::asio::yield_context& yield);
|
||||
|
||||
public:
|
||||
/** Constructor for Pg class.
|
||||
*
|
||||
* @param config Config parameters.
|
||||
* @param j Logger object.
|
||||
* @param stop Reference to connection pool's stop flag.
|
||||
* @param mutex Reference to connection pool's mutex.
|
||||
*/
|
||||
Pg(PgConfig const& config,
|
||||
boost::asio::io_context& ctx,
|
||||
bool& stop,
|
||||
std::mutex& mutex)
|
||||
: config_(config), strand_(ctx), stop_(stop), mutex_(mutex)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Database connection pool.
|
||||
*
|
||||
* Allow re-use of postgres connections. Postgres connections are created
|
||||
* as needed until configurable limit is reached. After use, each connection
|
||||
* is placed in a container ordered by time of use. Each request for
|
||||
* a connection grabs the most recently used connection from the container.
|
||||
* If none are available, a new connection is used (up to configured limit).
|
||||
* Idle connections are destroyed periodically after configurable
|
||||
* timeout duration.
|
||||
*
|
||||
* This should be stored as a shared pointer so PgQuery objects can safely
|
||||
* outlive it.
|
||||
*/
|
||||
class PgPool
|
||||
{
|
||||
friend class PgQuery;
|
||||
|
||||
using clock_type = std::chrono::steady_clock;
|
||||
|
||||
boost::asio::io_context& ioc_;
|
||||
PgConfig config_;
|
||||
std::mutex mutex_;
|
||||
std::condition_variable cond_;
|
||||
std::size_t connections_{};
|
||||
bool stop_{false};
|
||||
|
||||
/** Idle database connections ordered by timestamp to allow timing out. */
|
||||
std::multimap<std::chrono::time_point<clock_type>, std::unique_ptr<Pg>>
|
||||
idle_;
|
||||
|
||||
/** Get a postgres connection object.
|
||||
*
|
||||
* Return the most recent idle connection in the pool, if available.
|
||||
* Otherwise, return a new connection unless we're at the threshold.
|
||||
* If so, then wait until a connection becomes available.
|
||||
*
|
||||
* @return Postgres object.
|
||||
*/
|
||||
std::unique_ptr<Pg>
|
||||
checkout();
|
||||
|
||||
/** Return a postgres object to the pool for reuse.
|
||||
*
|
||||
* If connection is healthy, place in pool for reuse. After calling this,
|
||||
* the container no longer have a connection unless checkout() is called.
|
||||
*
|
||||
* @param pg Pg object.
|
||||
*/
|
||||
void
|
||||
checkin(std::unique_ptr<Pg>& pg);
|
||||
|
||||
public:
|
||||
/** Connection pool constructor.
|
||||
*
|
||||
* @param pgConfig Postgres config.
|
||||
* @param j Logger object.
|
||||
* @param parent Stoppable parent.
|
||||
*/
|
||||
PgPool(boost::asio::io_context& ioc, boost::json::object const& config);
|
||||
|
||||
~PgPool()
|
||||
{
|
||||
onStop();
|
||||
}
|
||||
|
||||
PgConfig&
|
||||
config()
|
||||
{
|
||||
return config_;
|
||||
}
|
||||
|
||||
/** Initiate idle connection timer.
|
||||
*
|
||||
* The PgPool object needs to be fully constructed to support asynchronous
|
||||
* operations.
|
||||
*/
|
||||
void
|
||||
setup();
|
||||
|
||||
/** Prepare for process shutdown. (Stoppable) */
|
||||
void
|
||||
onStop();
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Class to query postgres.
|
||||
*
|
||||
* This class should be used by functions outside of this
|
||||
* compilation unit for querying postgres. It automatically acquires and
|
||||
* relinquishes a database connection to handle each query.
|
||||
*/
|
||||
class PgQuery
|
||||
{
|
||||
private:
|
||||
std::shared_ptr<PgPool> pool_;
|
||||
std::unique_ptr<Pg> pg_;
|
||||
|
||||
public:
|
||||
PgQuery() = delete;
|
||||
|
||||
PgQuery(std::shared_ptr<PgPool> const& pool)
|
||||
: pool_(pool), pg_(pool->checkout())
|
||||
{
|
||||
}
|
||||
|
||||
~PgQuery()
|
||||
{
|
||||
pool_->checkin(pg_);
|
||||
}
|
||||
|
||||
// TODO. add sendQuery and getResult, for sending the query and getting the
|
||||
// result asynchronously. This could be useful for sending a bunch of
|
||||
// requests concurrently
|
||||
|
||||
/** Execute postgres query with parameters.
|
||||
*
|
||||
* @param dbParams Database command with parameters.
|
||||
* @return Result of query, including errors.
|
||||
*/
|
||||
PgResult
|
||||
operator()(pg_params const& dbParams, boost::asio::yield_context& yield)
|
||||
{
|
||||
if (!pg_) // It means we're stopping. Return empty result.
|
||||
return PgResult();
|
||||
return pg_->query(dbParams, yield);
|
||||
}
|
||||
|
||||
/** Execute postgres query with only command statement.
|
||||
*
|
||||
* @param command Command statement.
|
||||
* @return Result of query, including errors.
|
||||
*/
|
||||
PgResult
|
||||
operator()(char const* command, boost::asio::yield_context& yield)
|
||||
{
|
||||
return operator()(pg_params{command, {}}, yield);
|
||||
}
|
||||
|
||||
/** Insert multiple records into a table using Postgres' bulk COPY.
|
||||
*
|
||||
* Throws upon error.
|
||||
*
|
||||
* @param table Name of table for import.
|
||||
* @param records Records in the COPY IN format.
|
||||
*/
|
||||
void
|
||||
bulkInsert(
|
||||
char const* table,
|
||||
std::string const& records,
|
||||
boost::asio::yield_context& yield)
|
||||
{
|
||||
pg_->bulkInsert(table, records, yield);
|
||||
}
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
/** Create Postgres connection pool manager.
|
||||
*
|
||||
* @param pgConfig Configuration for Postgres.
|
||||
* @param j Logger object.
|
||||
* @param parent Stoppable parent object.
|
||||
* @return Postgres connection pool manager
|
||||
*/
|
||||
std::shared_ptr<PgPool>
|
||||
make_PgPool(boost::asio::io_context& ioc, boost::json::object const& pgConfig);
|
||||
|
||||
/** Initialize the Postgres schema.
|
||||
*
|
||||
* This function ensures that the database is running the latest version
|
||||
* of the schema.
|
||||
*
|
||||
* @param pool Postgres connection pool manager.
|
||||
*/
|
||||
void
|
||||
initSchema(std::shared_ptr<PgPool> const& pool);
|
||||
void
|
||||
initAccountTx(std::shared_ptr<PgPool> const& pool);
|
||||
|
||||
// Load the ledger info for the specified ledger/s from the database
|
||||
// @param whichLedger specifies the ledger to load via ledger sequence, ledger
|
||||
// hash or std::monostate (which loads the most recent)
|
||||
// @return vector of LedgerInfos
|
||||
std::optional<ripple::LedgerInfo>
|
||||
getLedger(
|
||||
std::variant<std::monostate, ripple::uint256, std::uint32_t> const&
|
||||
whichLedger,
|
||||
std::shared_ptr<PgPool>& pgPool);
|
||||
|
||||
#endif // RIPPLE_CORE_PG_H_INCLUDED
|
||||
@@ -1,860 +0,0 @@
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/format.hpp>
|
||||
#include <backend/PostgresBackend.h>
|
||||
#include <thread>
|
||||
namespace Backend {
|
||||
|
||||
// Type alias for async completion handlers
|
||||
using completion_token = boost::asio::yield_context;
|
||||
using function_type = void(boost::system::error_code);
|
||||
using result_type = boost::asio::async_result<completion_token, function_type>;
|
||||
using handler_type = typename result_type::completion_handler_type;
|
||||
|
||||
struct HandlerWrapper
|
||||
{
|
||||
handler_type handler;
|
||||
|
||||
HandlerWrapper(handler_type&& handler_) : handler(std::move(handler_))
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
PostgresBackend::PostgresBackend(
|
||||
boost::asio::io_context& ioc,
|
||||
boost::json::object const& config)
|
||||
: BackendInterface(config)
|
||||
, pgPool_(make_PgPool(ioc, config))
|
||||
, writeConnection_(pgPool_)
|
||||
{
|
||||
if (config.contains("write_interval"))
|
||||
{
|
||||
writeInterval_ = config.at("write_interval").as_int64();
|
||||
}
|
||||
}
|
||||
void
|
||||
PostgresBackend::writeLedger(
|
||||
ripple::LedgerInfo const& ledgerInfo,
|
||||
std::string&& ledgerHeader)
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
auto cmd = boost::format(
|
||||
R"(INSERT INTO ledgers
|
||||
VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))");
|
||||
|
||||
auto ledgerInsert = boost::str(
|
||||
cmd % ledgerInfo.seq % ripple::strHex(ledgerInfo.hash) %
|
||||
ripple::strHex(ledgerInfo.parentHash) % ledgerInfo.drops.drops() %
|
||||
ledgerInfo.closeTime.time_since_epoch().count() %
|
||||
ledgerInfo.parentCloseTime.time_since_epoch().count() %
|
||||
ledgerInfo.closeTimeResolution.count() % ledgerInfo.closeFlags %
|
||||
ripple::strHex(ledgerInfo.accountHash) %
|
||||
ripple::strHex(ledgerInfo.txHash));
|
||||
|
||||
auto res = writeConnection_(ledgerInsert.data(), yield);
|
||||
abortWrite_ = !res;
|
||||
inProcessLedger = ledgerInfo.seq;
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::writeAccountTransactions(
|
||||
std::vector<AccountTransactionsData>&& data)
|
||||
{
|
||||
if (abortWrite_)
|
||||
return;
|
||||
PgQuery pg(pgPool_);
|
||||
for (auto const& record : data)
|
||||
{
|
||||
for (auto const& a : record.accounts)
|
||||
{
|
||||
std::string acct = ripple::strHex(a);
|
||||
accountTxBuffer_ << "\\\\x" << acct << '\t'
|
||||
<< std::to_string(record.ledgerSequence) << '\t'
|
||||
<< std::to_string(record.transactionIndex) << '\t'
|
||||
<< "\\\\x" << ripple::strHex(record.txHash)
|
||||
<< '\n';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::doWriteLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob)
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
if (abortWrite_)
|
||||
return;
|
||||
objectsBuffer_ << "\\\\x" << ripple::strHex(key) << '\t'
|
||||
<< std::to_string(seq) << '\t' << "\\\\x"
|
||||
<< ripple::strHex(blob) << '\n';
|
||||
numRowsInObjectsBuffer_++;
|
||||
// If the buffer gets too large, the insert fails. Not sure why. So we
|
||||
// insert after 1 million records
|
||||
if (numRowsInObjectsBuffer_ % writeInterval_ == 0)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " Flushing large buffer. num objects = "
|
||||
<< numRowsInObjectsBuffer_;
|
||||
writeConnection_.bulkInsert("objects", objectsBuffer_.str(), yield);
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " Flushed large buffer";
|
||||
objectsBuffer_.str("");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::writeSuccessor(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& successor)
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
if (range)
|
||||
{
|
||||
if (successors_.count(key) > 0)
|
||||
return;
|
||||
successors_.insert(key);
|
||||
}
|
||||
successorBuffer_ << "\\\\x" << ripple::strHex(key) << '\t'
|
||||
<< std::to_string(seq) << '\t' << "\\\\x"
|
||||
<< ripple::strHex(successor) << '\n';
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << ripple::strHex(key) << " - " << std::to_string(seq);
|
||||
numRowsInSuccessorBuffer_++;
|
||||
if (numRowsInSuccessorBuffer_ % writeInterval_ == 0)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " Flushing large buffer. num successors = "
|
||||
<< numRowsInSuccessorBuffer_;
|
||||
writeConnection_.bulkInsert(
|
||||
"successor", successorBuffer_.str(), yield);
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " Flushed large buffer";
|
||||
successorBuffer_.str("");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata)
|
||||
{
|
||||
if (abortWrite_)
|
||||
return;
|
||||
transactionsBuffer_ << "\\\\x" << ripple::strHex(hash) << '\t'
|
||||
<< std::to_string(seq) << '\t' << std::to_string(date)
|
||||
<< '\t' << "\\\\x" << ripple::strHex(transaction)
|
||||
<< '\t' << "\\\\x" << ripple::strHex(metadata) << '\n';
|
||||
}
|
||||
|
||||
std::uint32_t
|
||||
checkResult(PgResult const& res, std::uint32_t const numFieldsExpected)
|
||||
{
|
||||
if (!res)
|
||||
{
|
||||
auto msg = res.msg();
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - " << msg;
|
||||
if (msg.find("statement timeout"))
|
||||
throw DatabaseTimeout();
|
||||
assert(false);
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
if (res.status() != PGRES_TUPLES_OK)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << " : Postgres response should have been "
|
||||
"PGRES_TUPLES_OK but instead was "
|
||||
<< res.status() << " - msg = " << res.msg();
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " - " << msg.str();
|
||||
assert(false);
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
|
||||
BOOST_LOG_TRIVIAL(trace)
|
||||
<< __func__ << " Postgres result msg : " << res.msg();
|
||||
if (res.isNull() || res.ntuples() == 0)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
else if (res.ntuples() > 0)
|
||||
{
|
||||
if (res.nfields() != numFieldsExpected)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Wrong number of fields in Postgres "
|
||||
"response. Expected "
|
||||
<< numFieldsExpected << ", but got " << res.nfields();
|
||||
throw std::runtime_error(msg.str());
|
||||
assert(false);
|
||||
}
|
||||
}
|
||||
return res.ntuples();
|
||||
}
|
||||
|
||||
ripple::LedgerInfo
|
||||
parseLedgerInfo(PgResult const& res)
|
||||
{
|
||||
std::int64_t ledgerSeq = res.asBigInt(0, 0);
|
||||
ripple::uint256 hash = res.asUInt256(0, 1);
|
||||
ripple::uint256 prevHash = res.asUInt256(0, 2);
|
||||
std::int64_t totalCoins = res.asBigInt(0, 3);
|
||||
std::int64_t closeTime = res.asBigInt(0, 4);
|
||||
std::int64_t parentCloseTime = res.asBigInt(0, 5);
|
||||
std::int64_t closeTimeRes = res.asBigInt(0, 6);
|
||||
std::int64_t closeFlags = res.asBigInt(0, 7);
|
||||
ripple::uint256 accountHash = res.asUInt256(0, 8);
|
||||
ripple::uint256 txHash = res.asUInt256(0, 9);
|
||||
|
||||
using time_point = ripple::NetClock::time_point;
|
||||
using duration = ripple::NetClock::duration;
|
||||
|
||||
ripple::LedgerInfo info;
|
||||
info.seq = ledgerSeq;
|
||||
info.hash = hash;
|
||||
info.parentHash = prevHash;
|
||||
info.drops = totalCoins;
|
||||
info.closeTime = time_point{duration{closeTime}};
|
||||
info.parentCloseTime = time_point{duration{parentCloseTime}};
|
||||
info.closeFlags = closeFlags;
|
||||
info.closeTimeResolution = duration{closeTimeRes};
|
||||
info.accountHash = accountHash;
|
||||
info.txHash = txHash;
|
||||
info.validated = true;
|
||||
return info;
|
||||
}
|
||||
std::optional<std::uint32_t>
|
||||
PostgresBackend::fetchLatestLedgerSequence(
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
auto const query =
|
||||
"SELECT ledger_seq FROM ledgers ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
if (auto res = pgQuery(query, yield); checkResult(res, 1))
|
||||
return res.asBigInt(0, 0);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
PostgresBackend::fetchLedgerBySequence(
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT * FROM ledgers WHERE ledger_seq = "
|
||||
<< std::to_string(sequence);
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 10))
|
||||
return parseLedgerInfo(res);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
PostgresBackend::fetchLedgerByHash(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT * FROM ledgers WHERE ledger_hash = \'\\x"
|
||||
<< ripple::to_string(hash) << "\'";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 10))
|
||||
return parseLedgerInfo(res);
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
PostgresBackend::hardFetchLedgerRange(boost::asio::yield_context& yield) const
|
||||
{
|
||||
auto range = PgQuery(pgPool_)("SELECT complete_ledgers()", yield);
|
||||
if (!range)
|
||||
return {};
|
||||
|
||||
std::string res{range.c_str()};
|
||||
BOOST_LOG_TRIVIAL(debug) << "range is = " << res;
|
||||
try
|
||||
{
|
||||
size_t minVal = 0;
|
||||
size_t maxVal = 0;
|
||||
if (res == "empty" || res == "error" || res.empty())
|
||||
return {};
|
||||
else if (size_t delim = res.find('-'); delim != std::string::npos)
|
||||
{
|
||||
minVal = std::stol(res.substr(0, delim));
|
||||
maxVal = std::stol(res.substr(delim + 1));
|
||||
}
|
||||
else
|
||||
{
|
||||
minVal = maxVal = std::stol(res);
|
||||
}
|
||||
return LedgerRange{minVal, maxVal};
|
||||
}
|
||||
catch (std::exception&)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error)
|
||||
<< __func__ << " : "
|
||||
<< "Error parsing result of getCompleteLedgers()";
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
PostgresBackend::doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT object FROM objects WHERE key = "
|
||||
<< "\'\\x" << ripple::strHex(key) << "\'"
|
||||
<< " AND ledger_seq <= " << std::to_string(sequence)
|
||||
<< " ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 1))
|
||||
{
|
||||
auto blob = res.asUnHexedBlob(0, 0);
|
||||
if (blob.size())
|
||||
return blob;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
// returns a transaction, metadata pair
|
||||
std::optional<TransactionAndMetadata>
|
||||
PostgresBackend::fetchTransaction(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT transaction,metadata,ledger_seq,date FROM transactions "
|
||||
"WHERE hash = "
|
||||
<< "\'\\x" << ripple::strHex(hash) << "\'";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 4))
|
||||
{
|
||||
return {
|
||||
{res.asUnHexedBlob(0, 0),
|
||||
res.asUnHexedBlob(0, 1),
|
||||
res.asBigInt(0, 2),
|
||||
res.asBigInt(0, 3)}};
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
std::vector<TransactionAndMetadata>
|
||||
PostgresBackend::fetchAllTransactionsInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT transaction, metadata, ledger_seq,date FROM transactions "
|
||||
"WHERE "
|
||||
<< "ledger_seq = " << std::to_string(ledgerSequence);
|
||||
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (size_t numRows = checkResult(res, 4))
|
||||
{
|
||||
std::vector<TransactionAndMetadata> txns;
|
||||
for (size_t i = 0; i < numRows; ++i)
|
||||
{
|
||||
txns.push_back(
|
||||
{res.asUnHexedBlob(i, 0),
|
||||
res.asUnHexedBlob(i, 1),
|
||||
res.asBigInt(i, 2),
|
||||
res.asBigInt(i, 3)});
|
||||
}
|
||||
return txns;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
std::vector<ripple::uint256>
|
||||
PostgresBackend::fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT hash FROM transactions WHERE "
|
||||
<< "ledger_seq = " << std::to_string(ledgerSequence);
|
||||
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (size_t numRows = checkResult(res, 1))
|
||||
{
|
||||
std::vector<ripple::uint256> hashes;
|
||||
for (size_t i = 0; i < numRows; ++i)
|
||||
{
|
||||
hashes.push_back(res.asUInt256(i, 0));
|
||||
}
|
||||
return hashes;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::optional<ripple::uint256>
|
||||
PostgresBackend::doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT next FROM successor WHERE key = "
|
||||
<< "\'\\x" << ripple::strHex(key) << "\'"
|
||||
<< " AND ledger_seq <= " << std::to_string(ledgerSequence)
|
||||
<< " ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
if (auto res = pgQuery(sql.str().data(), yield); checkResult(res, 1))
|
||||
{
|
||||
auto next = res.asUInt256(0, 0);
|
||||
if (next == lastKey)
|
||||
return {};
|
||||
return next;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
PostgresBackend::fetchTransactions(
|
||||
std::vector<ripple::uint256> const& hashes,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!hashes.size())
|
||||
return {};
|
||||
|
||||
std::vector<TransactionAndMetadata> results;
|
||||
results.resize(hashes.size());
|
||||
|
||||
handler_type handler(std::forward<decltype(yield)>(yield));
|
||||
result_type result(handler);
|
||||
|
||||
auto hw = new HandlerWrapper(std::move(handler));
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
|
||||
std::atomic_uint numRemaining = hashes.size();
|
||||
std::atomic_bool errored = false;
|
||||
|
||||
for (size_t i = 0; i < hashes.size(); ++i)
|
||||
{
|
||||
auto const& hash = hashes[i];
|
||||
boost::asio::spawn(
|
||||
get_associated_executor(yield),
|
||||
[this, &hash, &results, hw, &numRemaining, &errored, i](
|
||||
boost::asio::yield_context yield) {
|
||||
BOOST_LOG_TRIVIAL(trace) << __func__ << " getting txn = " << i;
|
||||
|
||||
PgQuery pgQuery(pgPool_);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT transaction,metadata,ledger_seq,date FROM "
|
||||
"transactions "
|
||||
"WHERE HASH = \'\\x"
|
||||
<< ripple::strHex(hash) << "\'";
|
||||
|
||||
try
|
||||
{
|
||||
if (auto const res = pgQuery(sql.str().data(), yield);
|
||||
checkResult(res, 4))
|
||||
{
|
||||
results[i] = {
|
||||
res.asUnHexedBlob(0, 0),
|
||||
res.asUnHexedBlob(0, 1),
|
||||
res.asBigInt(0, 2),
|
||||
res.asBigInt(0, 3)};
|
||||
}
|
||||
}
|
||||
catch (DatabaseTimeout const&)
|
||||
{
|
||||
errored = true;
|
||||
}
|
||||
|
||||
if (--numRemaining == 0)
|
||||
{
|
||||
handler_type h(std::move(hw->handler));
|
||||
h(boost::system::error_code{});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Yields the worker to the io_context until handler is called.
|
||||
result.get();
|
||||
|
||||
delete hw;
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||||
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " fetched " << std::to_string(hashes.size())
|
||||
<< " transactions asynchronously. took "
|
||||
<< std::to_string(duration.count());
|
||||
if (errored)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " Database fetch timed out";
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<Blob>
|
||||
PostgresBackend::doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
if (!keys.size())
|
||||
return {};
|
||||
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::vector<Blob> results;
|
||||
results.resize(keys.size());
|
||||
|
||||
handler_type handler(std::forward<decltype(yield)>(yield));
|
||||
result_type result(handler);
|
||||
|
||||
auto hw = new HandlerWrapper(std::move(handler));
|
||||
|
||||
std::atomic_uint numRemaining = keys.size();
|
||||
std::atomic_bool errored = false;
|
||||
auto start = std::chrono::system_clock::now();
|
||||
for (size_t i = 0; i < keys.size(); ++i)
|
||||
{
|
||||
auto const& key = keys[i];
|
||||
boost::asio::spawn(
|
||||
boost::asio::get_associated_executor(yield),
|
||||
[this, &key, &results, &numRemaining, &errored, hw, i, sequence](
|
||||
boost::asio::yield_context yield) {
|
||||
PgQuery pgQuery(pgPool_);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT object FROM "
|
||||
"objects "
|
||||
"WHERE key = \'\\x"
|
||||
<< ripple::strHex(key) << "\'"
|
||||
<< " AND ledger_seq <= " << std::to_string(sequence)
|
||||
<< " ORDER BY ledger_seq DESC LIMIT 1";
|
||||
|
||||
try
|
||||
{
|
||||
if (auto const res = pgQuery(sql.str().data(), yield);
|
||||
checkResult(res, 1))
|
||||
results[i] = res.asUnHexedBlob();
|
||||
}
|
||||
catch (DatabaseTimeout const& ex)
|
||||
{
|
||||
errored = true;
|
||||
}
|
||||
|
||||
if (--numRemaining == 0)
|
||||
{
|
||||
handler_type h(std::move(hw->handler));
|
||||
h(boost::system::error_code{});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Yields the worker to the io_context until handler is called.
|
||||
result.get();
|
||||
|
||||
delete hw;
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
|
||||
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " fetched " << std::to_string(keys.size())
|
||||
<< " objects asynchronously. ms = " << std::to_string(duration.count());
|
||||
if (errored)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(error) << __func__ << " Database fetch timed out";
|
||||
throw DatabaseTimeout();
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<LedgerObject>
|
||||
PostgresBackend::fetchLedgerDiff(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
|
||||
std::stringstream sql;
|
||||
sql << "SELECT key,object FROM objects "
|
||||
"WHERE "
|
||||
<< "ledger_seq = " << std::to_string(ledgerSequence);
|
||||
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (size_t numRows = checkResult(res, 2))
|
||||
{
|
||||
std::vector<LedgerObject> objects;
|
||||
for (size_t i = 0; i < numRows; ++i)
|
||||
{
|
||||
objects.push_back({res.asUInt256(i, 0), res.asUnHexedBlob(i, 1)});
|
||||
}
|
||||
return objects;
|
||||
}
|
||||
|
||||
return {};
|
||||
}
|
||||
|
||||
AccountTransactions
|
||||
PostgresBackend::fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<AccountTransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery(set_timeout, yield);
|
||||
pg_params dbParams;
|
||||
|
||||
char const*& command = dbParams.first;
|
||||
std::vector<std::optional<std::string>>& values = dbParams.second;
|
||||
command =
|
||||
"SELECT account_tx($1::bytea, $2::bigint, $3::bool, "
|
||||
"$4::bigint, $5::bigint)";
|
||||
values.resize(5);
|
||||
values[0] = "\\x" + strHex(account);
|
||||
|
||||
values[1] = std::to_string(limit);
|
||||
|
||||
values[2] = std::to_string(forward);
|
||||
|
||||
if (cursor)
|
||||
{
|
||||
values[3] = std::to_string(cursor->ledgerSequence);
|
||||
values[4] = std::to_string(cursor->transactionIndex);
|
||||
}
|
||||
for (size_t i = 0; i < values.size(); ++i)
|
||||
{
|
||||
BOOST_LOG_TRIVIAL(debug) << "value " << std::to_string(i) << " = "
|
||||
<< (values[i] ? values[i].value() : "null");
|
||||
}
|
||||
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto res = pgQuery(dbParams, yield);
|
||||
auto end = std::chrono::system_clock::now();
|
||||
|
||||
auto duration = ((end - start).count()) / 1000000000.0;
|
||||
BOOST_LOG_TRIVIAL(info)
|
||||
<< __func__ << " : executed stored_procedure in "
|
||||
<< std::to_string(duration)
|
||||
<< " num records = " << std::to_string(checkResult(res, 1));
|
||||
|
||||
checkResult(res, 1);
|
||||
|
||||
char const* resultStr = res.c_str();
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " : "
|
||||
<< "postgres result = " << resultStr
|
||||
<< " : account = " << strHex(account);
|
||||
|
||||
boost::json::value raw = boost::json::parse(resultStr);
|
||||
boost::json::object responseObj = raw.as_object();
|
||||
BOOST_LOG_TRIVIAL(debug) << " parsed = " << responseObj;
|
||||
if (responseObj.contains("transactions"))
|
||||
{
|
||||
auto txns = responseObj.at("transactions").as_array();
|
||||
std::vector<ripple::uint256> hashes;
|
||||
for (auto& hashHex : txns)
|
||||
{
|
||||
ripple::uint256 hash;
|
||||
if (hash.parseHex(hashHex.at("hash").as_string().c_str() + 2))
|
||||
hashes.push_back(hash);
|
||||
}
|
||||
if (responseObj.contains("cursor"))
|
||||
{
|
||||
return {
|
||||
fetchTransactions(hashes, yield),
|
||||
{{responseObj.at("cursor").at("ledger_sequence").as_int64(),
|
||||
responseObj.at("cursor")
|
||||
.at("transaction_index")
|
||||
.as_int64()}}};
|
||||
}
|
||||
return {fetchTransactions(hashes, yield), {}};
|
||||
}
|
||||
return {{}, {}};
|
||||
} // namespace Backend
|
||||
|
||||
void
|
||||
PostgresBackend::open(bool readOnly)
|
||||
{
|
||||
initSchema(pgPool_);
|
||||
initAccountTx(pgPool_);
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::close()
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
PostgresBackend::startWrites() const
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
numRowsInObjectsBuffer_ = 0;
|
||||
abortWrite_ = false;
|
||||
auto res = writeConnection_("BEGIN", yield);
|
||||
if (!res || res.status() != PGRES_COMMAND_OK)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Postgres error creating transaction: " << res.msg();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
bool
|
||||
PostgresBackend::doFinishWrites()
|
||||
{
|
||||
synchronous([&](boost::asio::yield_context yield) {
|
||||
if (!abortWrite_)
|
||||
{
|
||||
std::string txStr = transactionsBuffer_.str();
|
||||
writeConnection_.bulkInsert("transactions", txStr, yield);
|
||||
writeConnection_.bulkInsert(
|
||||
"account_transactions", accountTxBuffer_.str(), yield);
|
||||
std::string objectsStr = objectsBuffer_.str();
|
||||
if (objectsStr.size())
|
||||
writeConnection_.bulkInsert("objects", objectsStr, yield);
|
||||
BOOST_LOG_TRIVIAL(debug)
|
||||
<< __func__ << " objects size = " << objectsStr.size()
|
||||
<< " txns size = " << txStr.size();
|
||||
std::string successorStr = successorBuffer_.str();
|
||||
if (successorStr.size())
|
||||
writeConnection_.bulkInsert("successor", successorStr, yield);
|
||||
if (!range)
|
||||
{
|
||||
std::stringstream indexCreate;
|
||||
indexCreate
|
||||
<< "CREATE INDEX diff ON objects USING hash(ledger_seq) "
|
||||
"WHERE NOT "
|
||||
"ledger_seq = "
|
||||
<< std::to_string(inProcessLedger);
|
||||
writeConnection_(indexCreate.str().data(), yield);
|
||||
}
|
||||
}
|
||||
auto res = writeConnection_("COMMIT", yield);
|
||||
if (!res || res.status() != PGRES_COMMAND_OK)
|
||||
{
|
||||
std::stringstream msg;
|
||||
msg << "Postgres error committing transaction: " << res.msg();
|
||||
throw std::runtime_error(msg.str());
|
||||
}
|
||||
transactionsBuffer_.str("");
|
||||
transactionsBuffer_.clear();
|
||||
objectsBuffer_.str("");
|
||||
objectsBuffer_.clear();
|
||||
successorBuffer_.str("");
|
||||
successorBuffer_.clear();
|
||||
successors_.clear();
|
||||
accountTxBuffer_.str("");
|
||||
accountTxBuffer_.clear();
|
||||
numRowsInObjectsBuffer_ = 0;
|
||||
});
|
||||
|
||||
return !abortWrite_;
|
||||
}
|
||||
|
||||
bool
|
||||
PostgresBackend::doOnlineDelete(
|
||||
std::uint32_t const numLedgersToKeep,
|
||||
boost::asio::yield_context& yield) const
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return false;
|
||||
std::uint32_t minLedger = rng->maxSequence - numLedgersToKeep;
|
||||
if (minLedger <= rng->minSequence)
|
||||
return false;
|
||||
PgQuery pgQuery(pgPool_);
|
||||
pgQuery("SET statement_timeout TO 0", yield);
|
||||
std::optional<ripple::uint256> cursor;
|
||||
while (true)
|
||||
{
|
||||
auto [objects, curCursor] = retryOnTimeout([&]() {
|
||||
return fetchLedgerPage(cursor, minLedger, 256, false, yield);
|
||||
});
|
||||
BOOST_LOG_TRIVIAL(debug) << __func__ << " fetched a page";
|
||||
std::stringstream objectsBuffer;
|
||||
|
||||
for (auto& obj : objects)
|
||||
{
|
||||
objectsBuffer << "\\\\x" << ripple::strHex(obj.key) << '\t'
|
||||
<< std::to_string(minLedger) << '\t' << "\\\\x"
|
||||
<< ripple::strHex(obj.blob) << '\n';
|
||||
}
|
||||
pgQuery.bulkInsert("objects", objectsBuffer.str(), yield);
|
||||
cursor = curCursor;
|
||||
if (!cursor)
|
||||
break;
|
||||
}
|
||||
BOOST_LOG_TRIVIAL(info) << __func__ << " finished inserting into objects";
|
||||
{
|
||||
std::stringstream sql;
|
||||
sql << "DELETE FROM ledgers WHERE ledger_seq < "
|
||||
<< std::to_string(minLedger);
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (res.msg() != "ok")
|
||||
throw std::runtime_error("Error deleting from ledgers table");
|
||||
}
|
||||
{
|
||||
std::stringstream sql;
|
||||
sql << "DELETE FROM keys WHERE ledger_seq < "
|
||||
<< std::to_string(minLedger);
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (res.msg() != "ok")
|
||||
throw std::runtime_error("Error deleting from keys table");
|
||||
}
|
||||
{
|
||||
std::stringstream sql;
|
||||
sql << "DELETE FROM books WHERE ledger_seq < "
|
||||
<< std::to_string(minLedger);
|
||||
auto res = pgQuery(sql.str().data(), yield);
|
||||
if (res.msg() != "ok")
|
||||
throw std::runtime_error("Error deleting from books table");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace Backend
|
||||
@@ -1,145 +0,0 @@
|
||||
#ifndef RIPPLE_APP_REPORTING_POSTGRESBACKEND_H_INCLUDED
|
||||
#define RIPPLE_APP_REPORTING_POSTGRESBACKEND_H_INCLUDED
|
||||
#include <boost/json.hpp>
|
||||
#include <backend/BackendInterface.h>
|
||||
|
||||
namespace Backend {
|
||||
class PostgresBackend : public BackendInterface
|
||||
{
|
||||
private:
|
||||
mutable size_t numRowsInObjectsBuffer_ = 0;
|
||||
mutable std::stringstream objectsBuffer_;
|
||||
mutable size_t numRowsInSuccessorBuffer_ = 0;
|
||||
mutable std::stringstream successorBuffer_;
|
||||
mutable std::stringstream transactionsBuffer_;
|
||||
mutable std::stringstream accountTxBuffer_;
|
||||
std::shared_ptr<PgPool> pgPool_;
|
||||
mutable PgQuery writeConnection_;
|
||||
mutable bool abortWrite_ = false;
|
||||
std::uint32_t writeInterval_ = 1000000;
|
||||
std::uint32_t inProcessLedger = 0;
|
||||
mutable std::unordered_set<std::string> successors_;
|
||||
|
||||
const char* const set_timeout = "SET statement_timeout TO 10000";
|
||||
|
||||
public:
|
||||
PostgresBackend(
|
||||
boost::asio::io_context& ioc,
|
||||
boost::json::object const& config);
|
||||
|
||||
std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerBySequence(
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<ripple::LedgerInfo>
|
||||
fetchLedgerByHash(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
// returns a transaction, metadata pair
|
||||
std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(
|
||||
ripple::uint256 const& hash,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<LedgerObject>
|
||||
fetchLedgerDiff(
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(
|
||||
std::vector<ripple::uint256> const& hashes,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
AccountTransactions
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<AccountTransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
|
||||
void
|
||||
writeLedger(
|
||||
ripple::LedgerInfo const& ledgerInfo,
|
||||
std::string&& ledgerHeader) override;
|
||||
|
||||
void
|
||||
doWriteLedgerObject(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& blob) override;
|
||||
|
||||
void
|
||||
writeSuccessor(
|
||||
std::string&& key,
|
||||
std::uint32_t const seq,
|
||||
std::string&& successor) override;
|
||||
|
||||
void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata) override;
|
||||
|
||||
void
|
||||
writeAccountTransactions(
|
||||
std::vector<AccountTransactionsData>&& data) override;
|
||||
|
||||
void
|
||||
open(bool readOnly) override;
|
||||
|
||||
void
|
||||
close() override;
|
||||
|
||||
void
|
||||
startWrites() const override;
|
||||
|
||||
bool
|
||||
doFinishWrites() override;
|
||||
|
||||
bool
|
||||
doOnlineDelete(
|
||||
std::uint32_t const numLedgersToKeep,
|
||||
boost::asio::yield_context& yield) const override;
|
||||
};
|
||||
} // namespace Backend
|
||||
#endif
|
||||
@@ -1,174 +0,0 @@
|
||||
The data model used by clio is different than that used by rippled.
|
||||
rippled uses what is known as a SHAMap, which is a tree structure, with
|
||||
actual ledger and transaction data at the leaves of the tree. Looking up a record
|
||||
is a tree traversal, where the key is used to determine the path to the proper
|
||||
leaf node. The path from root to leaf is used as a proof-tree on the p2p network,
|
||||
where nodes can prove that a piece of data is present in a ledger by sending
|
||||
the path from root to leaf. Other nodes can verify this path and be certain
|
||||
that the data does actually exist in the ledger in question.
|
||||
|
||||
clio instead flattens the data model, so lookups are O(1). This results in time
|
||||
and space savings. This is possible because clio does not participate in the peer
|
||||
to peer protocol, and thus does not need to verify any data. clio fully trusts the
|
||||
rippled nodes that are being used as a data source.
|
||||
|
||||
clio uses certain features of database query languages to make this happen. Many
|
||||
databases provide the necessary features to implement the clio data model. At the
|
||||
time of writing, the data model is implemented in PostgreSQL and CQL (the query
|
||||
language used by Apache Cassandra and ScyllaDB).
|
||||
|
||||
The below examples are a sort of pseudo query language
|
||||
|
||||
## Ledgers
|
||||
|
||||
We store ledger headers in a ledgers table. In PostgreSQL, we store
|
||||
the headers in their deserialized form, so we can look up by sequence or hash.
|
||||
|
||||
In Cassandra, we store the headers as blobs. The primary table maps a ledger sequence
|
||||
to the blob, and a secondary table maps a ledger hash to a ledger sequence.
|
||||
|
||||
## Transactions
|
||||
Transactions are stored in a very basic table, with a schema like so:
|
||||
|
||||
```
|
||||
CREATE TABLE transactions (
|
||||
hash blob,
|
||||
ledger_sequence int,
|
||||
transaction blob,
|
||||
PRIMARY KEY(hash))
|
||||
```
|
||||
The primary key is the hash.
|
||||
|
||||
A common query pattern is fetching all transactions in a ledger. In PostgreSQL,
|
||||
nothing special is needed for this. We just query:
|
||||
```
|
||||
SELECT * FROM transactions WHERE ledger_sequence = s;
|
||||
```
|
||||
Cassandra doesn't handle queries like this well, since `ledger_sequence` is not
|
||||
the primary key, so we use a second table that maps a ledger sequence number
|
||||
to all of the hashes in that ledger:
|
||||
|
||||
```
|
||||
CREATE TABLE transaction_hashes (
|
||||
ledger_sequence int,
|
||||
hash blob,
|
||||
PRIMARY KEY(ledger_sequence, blob))
|
||||
```
|
||||
This table uses a compound primary key, so we can have multiple records with
|
||||
the same ledger sequence but different hash. Looking up all of the transactions
|
||||
in a given ledger then requires querying the transaction_hashes table to get the hashes of
|
||||
all of the transactions in the ledger, and then using those hashes to query the
|
||||
transactions table. Sometimes we only want the hashes though.
|
||||
|
||||
## Ledger data
|
||||
|
||||
Ledger data is more complicated than transaction data. Objects have different versions,
|
||||
where applying transactions in a particular ledger changes an object with a given
|
||||
key. A basic example is an account root object: the balance changes with every
|
||||
transaction sent or received, though the key (object ID) for this object remains the same.
|
||||
|
||||
Ledger data then is modeled like so:
|
||||
|
||||
```
|
||||
CREATE TABLE objects (
|
||||
id blob,
|
||||
ledger_sequence int,
|
||||
object blob,
|
||||
PRIMARY KEY(key,ledger_sequence))
|
||||
```
|
||||
|
||||
The `objects` table has a compound primary key. This is essential. Looking up
|
||||
a ledger object as of a given ledger then is just:
|
||||
```
|
||||
SELECT object FROM objects WHERE id = ? and ledger_sequence <= ?
|
||||
ORDER BY ledger_sequence DESC LIMIT 1;
|
||||
```
|
||||
This gives us the most recent ledger object written at or before a specified ledger.
|
||||
|
||||
When a ledger object is deleted, we write a record where `object` is just an empty blob.
|
||||
|
||||
### Next
|
||||
Generally RPCs that read ledger data will just use the above query pattern. However,
|
||||
a few RPCs (`book_offers` and `ledger_data`) make use of a certain tree operation
|
||||
called `successor`, which takes in an object id and ledger sequence, and returns
|
||||
the id of the successor object in the ledger. This is the object in the ledger with the smallest id
|
||||
greater than the input id.
|
||||
|
||||
This problem is quite difficult for clio's data model, since computing this
|
||||
generally requires the inner nodes of the tree, which clio doesn't store. A naive
|
||||
way to do this with PostgreSQL is like so:
|
||||
```
|
||||
SELECT * FROM objects WHERE id > ? AND ledger_sequence <= s ORDER BY id ASC, ledger_sequence DESC LIMIT 1;
|
||||
```
|
||||
This query is not really possible with Cassandra, unless you use ALLOW FILTERING, which
|
||||
is an anti pattern (for good reason!). It would require contacting basically every node
|
||||
in the entire cluster.
|
||||
|
||||
But even with Postgres, this query is not scalable. Why? Consider what the query
|
||||
is doing at the database level. The database starts at the input id, and begins scanning
|
||||
the table in ascending order of id. It needs to skip over any records that don't actually
|
||||
exist in the desired ledger, which are objects that have been deleted, or objects that
|
||||
were created later. As ledger history grows, this query skips over more and more records,
|
||||
which results in the query taking longer and longer. The time this query takes grows
|
||||
unbounded then, as ledger history just keeps growing. With under a million ledgers, this
|
||||
query is usable, but as we approach 10 million ledgers are more, the query starts to become very slow.
|
||||
|
||||
To alleviate this issue, the data model uses a checkpointing method. We create a second
|
||||
table called keys, like so:
|
||||
```
|
||||
CREATE TABLE keys (
|
||||
ledger_sequence int,
|
||||
id blob,
|
||||
PRIMARY KEY(ledger_sequence, id)
|
||||
)
|
||||
```
|
||||
However, this table does not have an entry for every ledger sequence. Instead,
|
||||
this table has an entry for rougly every 1 million ledgers. We call these ledgers
|
||||
flag ledgers. For each flag ledger, the keys table contains every object id in that
|
||||
ledger, as well as every object id that existed in any ledger between the last flag
|
||||
ledger and this one. This is a lot of keys, but not every key that ever existed (which
|
||||
is what the naive attempt at implementing successor was iterating over). In this manner,
|
||||
the performance is bounded. If we wanted to increase the performance of the successor operation,
|
||||
we can increase the frequency of flag ledgers. However, this will use more space. 1 million
|
||||
was chosen as a reasonable tradeoff to bound the performance, but not use too much space,
|
||||
especially since this is only needed for two RPC calls.
|
||||
|
||||
We write to this table every ledger, for each new key. However, we also need to handle
|
||||
keys that existed in the previous flag ledger. To do that, at each flag ledger, we
|
||||
iterate through the previous flag ledger, and write any keys that are still present
|
||||
in the new flag ledger. This is done asynchronously.
|
||||
|
||||
## Account Transactions
|
||||
rippled offers a RPC called `account_tx`. This RPC returns all transactions that
|
||||
affect a given account, and allows users to page backwards or forwards in time.
|
||||
Generally, this is a modeled with a table like so:
|
||||
```
|
||||
CREATE TABLE account_tx (
|
||||
account blob,
|
||||
ledger_sequence int,
|
||||
transaction_index int,
|
||||
hash blob,
|
||||
PRIMARY KEY(account,ledger_sequence,transaction_index))
|
||||
```
|
||||
|
||||
An example of looking up from this table going backwards in time is:
|
||||
```
|
||||
SELECT hash FROM account_tx WHERE account = ?
|
||||
AND ledger_sequence <= ? and transaction_index <= ?
|
||||
ORDER BY ledger_sequence DESC, transaction_index DESC;
|
||||
```
|
||||
|
||||
This query returns the hashes, and then we use those hashes to read from the
|
||||
transactions table.
|
||||
|
||||
## Comments
|
||||
There are various nuances around how these data models are tuned and optimized
|
||||
for each database implementation. Cassandra and PostgreSQL are very different,
|
||||
so some slight modifications are needed. However, the general model outlined here
|
||||
is implemented by both databases, and when adding a new database, this general model
|
||||
should be followed, unless there is a good reason not to. Generally, a database will be
|
||||
decently similar to either PostgreSQL or Cassandra, so using those as a basis should
|
||||
be sufficient.
|
||||
|
||||
Whatever database is used, clio requires strong consistency, and durability. For this
|
||||
reason, any replication strategy needs to maintain strong consistency.
|
||||
@@ -1,104 +0,0 @@
|
||||
#include <backend/SimpleCache.h>
|
||||
namespace Backend {
|
||||
uint32_t
|
||||
SimpleCache::latestLedgerSequence() const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
return latestSeq_;
|
||||
}
|
||||
|
||||
void
|
||||
SimpleCache::update(
|
||||
std::vector<LedgerObject> const& objs,
|
||||
uint32_t seq,
|
||||
bool isBackground)
|
||||
{
|
||||
{
|
||||
std::unique_lock lck{mtx_};
|
||||
if (seq > latestSeq_)
|
||||
{
|
||||
assert(seq == latestSeq_ + 1 || latestSeq_ == 0);
|
||||
latestSeq_ = seq;
|
||||
}
|
||||
for (auto const& obj : objs)
|
||||
{
|
||||
if (obj.blob.size())
|
||||
{
|
||||
if (isBackground && deletes_.count(obj.key))
|
||||
continue;
|
||||
auto& e = map_[obj.key];
|
||||
if (seq > e.seq)
|
||||
{
|
||||
e = {seq, obj.blob};
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
map_.erase(obj.key);
|
||||
if (!full_ && !isBackground)
|
||||
deletes_.insert(obj.key);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
std::optional<LedgerObject>
|
||||
SimpleCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
std::shared_lock{mtx_};
|
||||
if (seq != latestSeq_)
|
||||
return {};
|
||||
auto e = map_.upper_bound(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
return {{e->first, e->second.blob}};
|
||||
}
|
||||
std::optional<LedgerObject>
|
||||
SimpleCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (!full_)
|
||||
return {};
|
||||
std::shared_lock lck{mtx_};
|
||||
if (seq != latestSeq_)
|
||||
return {};
|
||||
auto e = map_.lower_bound(key);
|
||||
if (e == map_.begin())
|
||||
return {};
|
||||
--e;
|
||||
return {{e->first, e->second.blob}};
|
||||
}
|
||||
std::optional<Blob>
|
||||
SimpleCache::get(ripple::uint256 const& key, uint32_t seq) const
|
||||
{
|
||||
if (seq > latestSeq_)
|
||||
return {};
|
||||
std::shared_lock lck{mtx_};
|
||||
auto e = map_.find(key);
|
||||
if (e == map_.end())
|
||||
return {};
|
||||
if (seq < e->second.seq)
|
||||
return {};
|
||||
return {e->second.blob};
|
||||
}
|
||||
|
||||
void
|
||||
SimpleCache::setFull()
|
||||
{
|
||||
full_ = true;
|
||||
std::unique_lock lck{mtx_};
|
||||
deletes_.clear();
|
||||
}
|
||||
|
||||
bool
|
||||
SimpleCache::isFull() const
|
||||
{
|
||||
return full_;
|
||||
}
|
||||
size_t
|
||||
SimpleCache::size() const
|
||||
{
|
||||
std::shared_lock lck{mtx_};
|
||||
return map_.size();
|
||||
}
|
||||
} // namespace Backend
|
||||
@@ -1,63 +0,0 @@
|
||||
#ifndef CLIO_SIMPLECACHE_H_INCLUDED
|
||||
#define CLIO_SIMPLECACHE_H_INCLUDED
|
||||
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/hardened_hash.h>
|
||||
#include <backend/Types.h>
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <shared_mutex>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
namespace Backend {
|
||||
class SimpleCache
|
||||
{
|
||||
struct CacheEntry
|
||||
{
|
||||
uint32_t seq = 0;
|
||||
Blob blob;
|
||||
};
|
||||
std::map<ripple::uint256, CacheEntry> map_;
|
||||
mutable std::shared_mutex mtx_;
|
||||
uint32_t latestSeq_ = 0;
|
||||
std::atomic_bool full_ = false;
|
||||
// temporary set to prevent background thread from writing already deleted
|
||||
// data. not used when cache is full
|
||||
std::unordered_set<ripple::uint256, ripple::hardened_hash<>> deletes_;
|
||||
|
||||
public:
|
||||
// Update the cache with new ledger objects
|
||||
// set isBackground to true when writing old data from a background thread
|
||||
void
|
||||
update(
|
||||
std::vector<LedgerObject> const& blobs,
|
||||
uint32_t seq,
|
||||
bool isBackground = false);
|
||||
|
||||
std::optional<Blob>
|
||||
get(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
// always returns empty optional if isFull() is false
|
||||
std::optional<LedgerObject>
|
||||
getSuccessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
// always returns empty optional if isFull() is false
|
||||
std::optional<LedgerObject>
|
||||
getPredecessor(ripple::uint256 const& key, uint32_t seq) const;
|
||||
|
||||
void
|
||||
setFull();
|
||||
|
||||
uint32_t
|
||||
latestLedgerSequence() const;
|
||||
|
||||
// whether the cache has all data for the most recent ledger
|
||||
bool
|
||||
isFull() const;
|
||||
|
||||
size_t
|
||||
size() const;
|
||||
};
|
||||
|
||||
} // namespace Backend
|
||||
#endif
|
||||
@@ -1,73 +0,0 @@
|
||||
#ifndef CLIO_TYPES_H_INCLUDED
|
||||
#define CLIO_TYPES_H_INCLUDED
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace Backend {
|
||||
|
||||
// *** return types
|
||||
|
||||
using Blob = std::vector<unsigned char>;
|
||||
|
||||
struct LedgerObject
|
||||
{
|
||||
ripple::uint256 key;
|
||||
Blob blob;
|
||||
bool
|
||||
operator==(const LedgerObject& other) const
|
||||
{
|
||||
return key == other.key && blob == other.blob;
|
||||
}
|
||||
};
|
||||
|
||||
struct LedgerPage
|
||||
{
|
||||
std::vector<LedgerObject> objects;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
struct BookOffersPage
|
||||
{
|
||||
std::vector<LedgerObject> offers;
|
||||
std::optional<ripple::uint256> cursor;
|
||||
};
|
||||
struct TransactionAndMetadata
|
||||
{
|
||||
Blob transaction;
|
||||
Blob metadata;
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t date;
|
||||
bool
|
||||
operator==(const TransactionAndMetadata& other) const
|
||||
{
|
||||
return transaction == other.transaction && metadata == other.metadata &&
|
||||
ledgerSequence == other.ledgerSequence && date == other.date;
|
||||
}
|
||||
};
|
||||
|
||||
struct AccountTransactionsCursor
|
||||
{
|
||||
std::uint32_t ledgerSequence;
|
||||
std::uint32_t transactionIndex;
|
||||
};
|
||||
|
||||
struct AccountTransactions
|
||||
{
|
||||
std::vector<TransactionAndMetadata> txns;
|
||||
std::optional<AccountTransactionsCursor> cursor;
|
||||
};
|
||||
|
||||
struct LedgerRange
|
||||
{
|
||||
std::uint32_t minSequence;
|
||||
std::uint32_t maxSequence;
|
||||
};
|
||||
constexpr ripple::uint256 firstKey{
|
||||
"0000000000000000000000000000000000000000000000000000000000000000"};
|
||||
constexpr ripple::uint256 lastKey{
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"};
|
||||
constexpr ripple::uint256 hi192{
|
||||
"0000000000000000000000000000000000000000000000001111111111111111"};
|
||||
} // namespace Backend
|
||||
#endif
|
||||
240
src/data/BackendCounters.cpp
Normal file
240
src/data/BackendCounters.cpp
Normal file
@@ -0,0 +1,240 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "data/BackendCounters.hpp"
|
||||
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/prometheus/Label.hpp"
|
||||
#include "util/prometheus/Prometheus.hpp"
|
||||
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
|
||||
namespace {
|
||||
|
||||
std::vector<std::int64_t> const histogramBuckets{1, 2, 5, 10, 20, 50, 100, 200, 500, 700, 1000};
|
||||
|
||||
std::int64_t
|
||||
durationInMillisecondsSince(std::chrono::steady_clock::time_point const startTime)
|
||||
{
|
||||
return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - startTime).count();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
using namespace util::prometheus;
|
||||
|
||||
BackendCounters::BackendCounters()
|
||||
: tooBusyCounter_(PrometheusService::counterInt(
|
||||
"backend_too_busy_total_number",
|
||||
Labels(),
|
||||
"The total number of times the backend was too busy to process a request"
|
||||
))
|
||||
, writeSyncCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync"}}),
|
||||
"The total number of times the backend had to write synchronously"
|
||||
))
|
||||
, writeSyncRetryCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({Label{"operation", "write_sync_retry"}}),
|
||||
"The total number of times the backend had to retry a synchronous write"
|
||||
))
|
||||
, asyncWriteCounters_{"write_async"}
|
||||
, asyncReadCounters_{"read_async"}
|
||||
, readDurationHistogram_(PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "read"}}),
|
||||
histogramBuckets,
|
||||
"The duration of backend read operations including retries"
|
||||
))
|
||||
, writeDurationHistogram_(PrometheusService::histogramInt(
|
||||
"backend_duration_milliseconds_histogram",
|
||||
Labels({Label{"operation", "write"}}),
|
||||
histogramBuckets,
|
||||
"The duration of backend write operations including retries"
|
||||
))
|
||||
{
|
||||
}
|
||||
|
||||
BackendCounters::PtrType
|
||||
BackendCounters::make()
|
||||
{
|
||||
struct EnableMakeShared : public BackendCounters {};
|
||||
return std::make_shared<EnableMakeShared>();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerTooBusy()
|
||||
{
|
||||
++tooBusyCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteSync(std::chrono::steady_clock::time_point const startTime)
|
||||
{
|
||||
++writeSyncCounter_.get();
|
||||
writeDurationHistogram_.get().observe(durationInMillisecondsSince(startTime));
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteSyncRetry()
|
||||
{
|
||||
++writeSyncRetryCounter_.get();
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteStarted()
|
||||
{
|
||||
asyncWriteCounters_.registerStarted(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteFinished(std::chrono::steady_clock::time_point const startTime)
|
||||
{
|
||||
asyncWriteCounters_.registerFinished(1u);
|
||||
auto const duration = durationInMillisecondsSince(startTime);
|
||||
writeDurationHistogram_.get().observe(duration);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerWriteRetry()
|
||||
{
|
||||
asyncWriteCounters_.registerRetry(1u);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadStarted(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerStarted(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadFinished(std::chrono::steady_clock::time_point const startTime, std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerFinished(count);
|
||||
auto const duration = durationInMillisecondsSince(startTime);
|
||||
for (std::uint64_t i = 0; i < count; ++i)
|
||||
readDurationHistogram_.get().observe(duration);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadRetry(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerRetry(count);
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::registerReadError(std::uint64_t const count)
|
||||
{
|
||||
asyncReadCounters_.registerError(count);
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
BackendCounters::report() const
|
||||
{
|
||||
boost::json::object result;
|
||||
result["too_busy"] = tooBusyCounter_.get().value();
|
||||
result["write_sync"] = writeSyncCounter_.get().value();
|
||||
result["write_sync_retry"] = writeSyncRetryCounter_.get().value();
|
||||
for (auto const& [key, value] : asyncWriteCounters_.report())
|
||||
result[key] = value;
|
||||
for (auto const& [key, value] : asyncReadCounters_.report())
|
||||
result[key] = value;
|
||||
return result;
|
||||
}
|
||||
|
||||
BackendCounters::AsyncOperationCounters::AsyncOperationCounters(std::string name)
|
||||
: name_(std::move(name))
|
||||
, pendingCounter_(PrometheusService::gaugeInt(
|
||||
"backend_operations_current_number",
|
||||
Labels({{"operation", name_}, {"status", "pending"}}),
|
||||
"The current number of pending " + name_ + " operations"
|
||||
))
|
||||
, completedCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "completed"}}),
|
||||
"The total number of completed " + name_ + " operations"
|
||||
))
|
||||
, retryCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "retry"}}),
|
||||
"The total number of retried " + name_ + " operations"
|
||||
))
|
||||
, errorCounter_(PrometheusService::counterInt(
|
||||
"backend_operations_total_number",
|
||||
Labels({{"operation", name_}, {"status", "error"}}),
|
||||
"The total number of errored " + name_ + " operations"
|
||||
))
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerStarted(std::uint64_t const count)
|
||||
{
|
||||
pendingCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerFinished(std::uint64_t const count)
|
||||
{
|
||||
ASSERT(
|
||||
pendingCounter_.get().value() >= static_cast<std::int64_t>(count),
|
||||
"Finished operations can't be more than pending"
|
||||
);
|
||||
pendingCounter_.get() -= count;
|
||||
completedCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerRetry(std::uint64_t count)
|
||||
{
|
||||
retryCounter_.get() += count;
|
||||
}
|
||||
|
||||
void
|
||||
BackendCounters::AsyncOperationCounters::registerError(std::uint64_t count)
|
||||
{
|
||||
ASSERT(
|
||||
pendingCounter_.get().value() >= static_cast<std::int64_t>(count), "Error operations can't be more than pending"
|
||||
);
|
||||
pendingCounter_.get() -= count;
|
||||
errorCounter_.get() += count;
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
BackendCounters::AsyncOperationCounters::report() const
|
||||
{
|
||||
return boost::json::object{
|
||||
{name_ + "_pending", pendingCounter_.get().value()},
|
||||
{name_ + "_completed", completedCounter_.get().value()},
|
||||
{name_ + "_retry", retryCounter_.get().value()},
|
||||
{name_ + "_error", errorCounter_.get().value()}
|
||||
};
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
164
src/data/BackendCounters.hpp
Normal file
164
src/data/BackendCounters.hpp
Normal file
@@ -0,0 +1,164 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "util/prometheus/Counter.hpp"
|
||||
#include "util/prometheus/Gauge.hpp"
|
||||
#include "util/prometheus/Histogram.hpp"
|
||||
|
||||
#include <boost/json/object.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief A concept for a class that can be used to count backend operations.
|
||||
*/
|
||||
template <typename T>
|
||||
concept SomeBackendCounters = requires(T a) {
|
||||
typename T::PtrType;
|
||||
{
|
||||
a.registerTooBusy()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteSync(std::chrono::steady_clock::time_point{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteSyncRetry()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteStarted()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteFinished(std::chrono::steady_clock::time_point{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerWriteRetry()
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadStarted(std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadFinished(std::chrono::steady_clock::time_point{}, std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadRetry(std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.registerReadError(std::uint64_t{})
|
||||
} -> std::same_as<void>;
|
||||
{
|
||||
a.report()
|
||||
} -> std::same_as<boost::json::object>;
|
||||
};
|
||||
|
||||
/**
|
||||
* @brief Holds statistics about the backend.
|
||||
*
|
||||
* @note This class is thread-safe.
|
||||
*/
|
||||
class BackendCounters {
|
||||
public:
|
||||
using PtrType = std::shared_ptr<BackendCounters>;
|
||||
|
||||
static PtrType
|
||||
make();
|
||||
|
||||
void
|
||||
registerTooBusy();
|
||||
|
||||
void
|
||||
registerWriteSync(std::chrono::steady_clock::time_point startTime);
|
||||
|
||||
void
|
||||
registerWriteSyncRetry();
|
||||
|
||||
void
|
||||
registerWriteStarted();
|
||||
|
||||
void
|
||||
registerWriteFinished(std::chrono::steady_clock::time_point startTime);
|
||||
|
||||
void
|
||||
registerWriteRetry();
|
||||
|
||||
void
|
||||
registerReadStarted(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadFinished(std::chrono::steady_clock::time_point startTime, std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadRetry(std::uint64_t count = 1u);
|
||||
|
||||
void
|
||||
registerReadError(std::uint64_t count = 1u);
|
||||
|
||||
boost::json::object
|
||||
report() const;
|
||||
|
||||
private:
|
||||
BackendCounters();
|
||||
|
||||
class AsyncOperationCounters {
|
||||
public:
|
||||
AsyncOperationCounters(std::string name);
|
||||
|
||||
void
|
||||
registerStarted(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerFinished(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerRetry(std::uint64_t count);
|
||||
|
||||
void
|
||||
registerError(std::uint64_t count);
|
||||
|
||||
boost::json::object
|
||||
report() const;
|
||||
|
||||
private:
|
||||
std::string name_;
|
||||
std::reference_wrapper<util::prometheus::GaugeInt> pendingCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> completedCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> retryCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> errorCounter_;
|
||||
};
|
||||
|
||||
std::reference_wrapper<util::prometheus::CounterInt> tooBusyCounter_;
|
||||
|
||||
std::reference_wrapper<util::prometheus::CounterInt> writeSyncCounter_;
|
||||
std::reference_wrapper<util::prometheus::CounterInt> writeSyncRetryCounter_;
|
||||
|
||||
AsyncOperationCounters asyncWriteCounters_{"write_async"};
|
||||
AsyncOperationCounters asyncReadCounters_{"read_async"};
|
||||
|
||||
std::reference_wrapper<util::prometheus::HistogramInt> readDurationHistogram_;
|
||||
std::reference_wrapper<util::prometheus::HistogramInt> writeDurationHistogram_;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
70
src/data/BackendFactory.hpp
Normal file
70
src/data/BackendFactory.hpp
Normal file
@@ -0,0 +1,70 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/CassandraBackend.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "util/config/Config.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/algorithm/string.hpp>
|
||||
#include <boost/algorithm/string/predicate.hpp>
|
||||
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief A factory function that creates the backend based on a config.
|
||||
*
|
||||
* @param config The clio config to use
|
||||
* @return A shared_ptr<BackendInterface> with the selected implementation
|
||||
*/
|
||||
inline std::shared_ptr<BackendInterface>
|
||||
make_Backend(util::Config const& config)
|
||||
{
|
||||
static util::Logger const log{"Backend"};
|
||||
LOG(log.info()) << "Constructing BackendInterface";
|
||||
|
||||
auto const readOnly = config.valueOr("read_only", false);
|
||||
|
||||
auto const type = config.value<std::string>("database.type");
|
||||
std::shared_ptr<BackendInterface> backend = nullptr;
|
||||
|
||||
// TODO: retire `cassandra-new` by next release after 2.0
|
||||
if (boost::iequals(type, "cassandra") or boost::iequals(type, "cassandra-new")) {
|
||||
auto cfg = config.section("database." + type);
|
||||
backend = std::make_shared<data::cassandra::CassandraBackend>(data::cassandra::SettingsProvider{cfg}, readOnly);
|
||||
}
|
||||
|
||||
if (!backend)
|
||||
throw std::runtime_error("Invalid database type");
|
||||
|
||||
auto const rng = backend->hardFetchLedgerRangeNoThrow();
|
||||
if (rng)
|
||||
backend->setRange(rng->minSequence, rng->maxSequence);
|
||||
|
||||
LOG(log.info()) << "Constructed BackendInterface Successfully";
|
||||
return backend;
|
||||
}
|
||||
} // namespace data
|
||||
387
src/data/BackendInterface.cpp
Normal file
387
src/data/BackendInterface.cpp
Normal file
@@ -0,0 +1,387 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
|
||||
#include "data/Types.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
#include <ripple/protocol/Fees.h>
|
||||
#include <ripple/protocol/Indexes.h>
|
||||
#include <ripple/protocol/SField.h>
|
||||
#include <ripple/protocol/STLedgerEntry.h>
|
||||
#include <ripple/protocol/Serializer.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
// local to compilation unit loggers
|
||||
namespace {
|
||||
util::Logger gLog{"Backend"};
|
||||
} // namespace
|
||||
|
||||
namespace data {
|
||||
bool
|
||||
BackendInterface::finishWrites(std::uint32_t const ledgerSequence)
|
||||
{
|
||||
LOG(gLog.debug()) << "Want finish writes for " << ledgerSequence;
|
||||
auto commitRes = doFinishWrites();
|
||||
if (commitRes) {
|
||||
LOG(gLog.debug()) << "Successfully commited. Updating range now to " << ledgerSequence;
|
||||
updateRange(ledgerSequence);
|
||||
}
|
||||
return commitRes;
|
||||
}
|
||||
void
|
||||
BackendInterface::writeLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob)
|
||||
{
|
||||
ASSERT(key.size() == sizeof(ripple::uint256), "Key must be 256 bits");
|
||||
doWriteLedgerObject(std::move(key), seq, std::move(blob));
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRangeNoThrow() const
|
||||
{
|
||||
return retryOnTimeout([&]() { return hardFetchLedgerRange(); });
|
||||
}
|
||||
|
||||
// *** state data methods
|
||||
std::optional<Blob>
|
||||
BackendInterface::fetchLedgerObject(
|
||||
ripple::uint256 const& key,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto obj = cache_.get(key, sequence);
|
||||
if (obj) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
return *obj;
|
||||
}
|
||||
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
auto dbObj = doFetchLedgerObject(key, sequence, yield);
|
||||
if (!dbObj) {
|
||||
LOG(gLog.trace()) << "Missed cache and missed in db";
|
||||
} else {
|
||||
LOG(gLog.trace()) << "Missed cache but found in db";
|
||||
}
|
||||
return dbObj;
|
||||
}
|
||||
|
||||
std::vector<Blob>
|
||||
BackendInterface::fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
std::vector<Blob> results;
|
||||
results.resize(keys.size());
|
||||
std::vector<ripple::uint256> misses;
|
||||
for (size_t i = 0; i < keys.size(); ++i) {
|
||||
auto obj = cache_.get(keys[i], sequence);
|
||||
if (obj) {
|
||||
results[i] = *obj;
|
||||
} else {
|
||||
misses.push_back(keys[i]);
|
||||
}
|
||||
}
|
||||
LOG(gLog.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size();
|
||||
|
||||
if (!misses.empty()) {
|
||||
auto objs = doFetchLedgerObjects(misses, sequence, yield);
|
||||
for (size_t i = 0, j = 0; i < results.size(); ++i) {
|
||||
if (results[i].empty()) {
|
||||
results[i] = objs[j];
|
||||
++j;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
// Fetches the successor to key/index
|
||||
std::optional<ripple::uint256>
|
||||
BackendInterface::fetchSuccessorKey(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto succ = cache_.getSuccessor(key, ledgerSequence);
|
||||
if (succ) {
|
||||
LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key);
|
||||
} else {
|
||||
LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key);
|
||||
}
|
||||
return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield);
|
||||
}
|
||||
|
||||
std::optional<LedgerObject>
|
||||
BackendInterface::fetchSuccessorObject(
|
||||
ripple::uint256 key,
|
||||
std::uint32_t const ledgerSequence,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
auto succ = fetchSuccessorKey(key, ledgerSequence, yield);
|
||||
if (succ) {
|
||||
auto obj = fetchLedgerObject(*succ, ledgerSequence, yield);
|
||||
if (!obj)
|
||||
return {{*succ, {}}};
|
||||
|
||||
return {{*succ, *obj}};
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
BookOffersPage
|
||||
BackendInterface::fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
// TODO try to speed this up. This can take a few seconds. The goal is
|
||||
// to get it down to a few hundred milliseconds.
|
||||
BookOffersPage page;
|
||||
ripple::uint256 const bookEnd = ripple::getQualityNext(book);
|
||||
ripple::uint256 uTipIndex = book;
|
||||
std::vector<ripple::uint256> keys;
|
||||
auto getMillis = [](auto diff) { return std::chrono::duration_cast<std::chrono::milliseconds>(diff).count(); };
|
||||
auto begin = std::chrono::system_clock::now();
|
||||
std::uint32_t numSucc = 0;
|
||||
std::uint32_t numPages = 0;
|
||||
long succMillis = 0;
|
||||
long pageMillis = 0;
|
||||
while (keys.size() < limit) {
|
||||
auto mid1 = std::chrono::system_clock::now();
|
||||
auto offerDir = fetchSuccessorObject(uTipIndex, ledgerSequence, yield);
|
||||
auto mid2 = std::chrono::system_clock::now();
|
||||
numSucc++;
|
||||
succMillis += getMillis(mid2 - mid1);
|
||||
if (!offerDir || offerDir->key >= bookEnd) {
|
||||
LOG(gLog.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking";
|
||||
break;
|
||||
}
|
||||
uTipIndex = offerDir->key;
|
||||
while (keys.size() < limit) {
|
||||
++numPages;
|
||||
ripple::STLedgerEntry const sle{
|
||||
ripple::SerialIter{offerDir->blob.data(), offerDir->blob.size()}, offerDir->key
|
||||
};
|
||||
auto indexes = sle.getFieldV256(ripple::sfIndexes);
|
||||
keys.insert(keys.end(), indexes.begin(), indexes.end());
|
||||
auto next = sle.getFieldU64(ripple::sfIndexNext);
|
||||
if (next == 0u) {
|
||||
LOG(gLog.trace()) << "Next is empty. breaking";
|
||||
break;
|
||||
}
|
||||
auto nextKey = ripple::keylet::page(uTipIndex, next);
|
||||
auto nextDir = fetchLedgerObject(nextKey.key, ledgerSequence, yield);
|
||||
ASSERT(nextDir.has_value(), "Next dir must exist");
|
||||
offerDir->blob = *nextDir;
|
||||
offerDir->key = nextKey.key;
|
||||
}
|
||||
auto mid3 = std::chrono::system_clock::now();
|
||||
pageMillis += getMillis(mid3 - mid2);
|
||||
}
|
||||
auto mid = std::chrono::system_clock::now();
|
||||
auto objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < keys.size() && i < limit; ++i) {
|
||||
LOG(gLog.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i])
|
||||
<< " ledgerSequence = " << ledgerSequence;
|
||||
ASSERT(!objs[i].empty(), "Ledger object can't be empty");
|
||||
page.offers.push_back({keys[i], objs[i]});
|
||||
}
|
||||
auto end = std::chrono::system_clock::now();
|
||||
LOG(gLog.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took "
|
||||
<< std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took "
|
||||
<< std::to_string(succMillis) << " milliseonds. Fetched next dir " << std::to_string(numSucc)
|
||||
<< " times"
|
||||
<< " Fetching next page of dir took " << std::to_string(pageMillis) << " milliseconds"
|
||||
<< ". num pages = " << std::to_string(numPages) << ". Fetching all objects took "
|
||||
<< std::to_string(getMillis(end - mid))
|
||||
<< " milliseconds. total time = " << std::to_string(getMillis(end - begin)) << " milliseconds"
|
||||
<< " book = " << ripple::strHex(book);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::hardFetchLedgerRange() const
|
||||
{
|
||||
return synchronous([this](auto yield) { return hardFetchLedgerRange(yield); });
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
BackendInterface::fetchLedgerRange() const
|
||||
{
|
||||
std::shared_lock const lck(rngMtx_);
|
||||
return range;
|
||||
}
|
||||
|
||||
void
|
||||
BackendInterface::updateRange(uint32_t newMax)
|
||||
{
|
||||
std::scoped_lock const lck(rngMtx_);
|
||||
|
||||
ASSERT(
|
||||
!range || newMax >= range->maxSequence,
|
||||
"Range shouldn't exist yet or newMax should be greater. newMax = {}, range->maxSequence = {}",
|
||||
newMax,
|
||||
range->maxSequence
|
||||
);
|
||||
|
||||
if (!range) {
|
||||
range = {newMax, newMax};
|
||||
} else {
|
||||
range->maxSequence = newMax;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
BackendInterface::setRange(uint32_t min, uint32_t max, bool force)
|
||||
{
|
||||
std::scoped_lock const lck(rngMtx_);
|
||||
|
||||
if (!force) {
|
||||
ASSERT(min <= max, "Range min must be less than or equal to max");
|
||||
ASSERT(not range.has_value(), "Range was already set");
|
||||
}
|
||||
|
||||
range = {min, max};
|
||||
}
|
||||
|
||||
LedgerPage
|
||||
BackendInterface::fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context yield
|
||||
) const
|
||||
{
|
||||
LedgerPage page;
|
||||
|
||||
std::vector<ripple::uint256> keys;
|
||||
bool reachedEnd = false;
|
||||
while (keys.size() < limit && !reachedEnd) {
|
||||
ripple::uint256 const& curCursor = !keys.empty() ? keys.back() : (cursor ? *cursor : firstKey);
|
||||
std::uint32_t const seq = outOfOrder ? range->maxSequence : ledgerSequence;
|
||||
auto succ = fetchSuccessorKey(curCursor, seq, yield);
|
||||
if (!succ) {
|
||||
reachedEnd = true;
|
||||
} else {
|
||||
keys.push_back(*succ);
|
||||
}
|
||||
}
|
||||
|
||||
auto objects = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
for (size_t i = 0; i < objects.size(); ++i) {
|
||||
if (!objects[i].empty()) {
|
||||
page.objects.push_back({keys[i], std::move(objects[i])});
|
||||
} else if (!outOfOrder) {
|
||||
LOG(gLog.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i])
|
||||
<< " - seq = " << ledgerSequence;
|
||||
std::stringstream msg;
|
||||
for (size_t j = 0; j < objects.size(); ++j) {
|
||||
msg << " - " << ripple::strHex(keys[j]);
|
||||
}
|
||||
LOG(gLog.error()) << msg.str();
|
||||
}
|
||||
}
|
||||
if (!keys.empty() && !reachedEnd)
|
||||
page.cursor = keys.back();
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
std::optional<ripple::Fees>
|
||||
BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context yield) const
|
||||
{
|
||||
ripple::Fees fees;
|
||||
|
||||
auto key = ripple::keylet::fees().key;
|
||||
auto bytes = fetchLedgerObject(key, seq, yield);
|
||||
|
||||
if (!bytes) {
|
||||
LOG(gLog.error()) << "Could not find fees";
|
||||
return {};
|
||||
}
|
||||
|
||||
ripple::SerialIter it(bytes->data(), bytes->size());
|
||||
ripple::SLE const sle{it, key};
|
||||
|
||||
// XRPFees amendment introduced new fields for fees calculations.
|
||||
// New fields are set and the old fields are removed via `set_fees` tx.
|
||||
// Fallback to old fields if `set_fees` was not yet used to update the fields on this tx.
|
||||
auto hasNewFields = false;
|
||||
{
|
||||
auto const baseFeeXRP = sle.at(~ripple::sfBaseFeeDrops);
|
||||
auto const reserveBaseXRP = sle.at(~ripple::sfReserveBaseDrops);
|
||||
auto const reserveIncrementXRP = sle.at(~ripple::sfReserveIncrementDrops);
|
||||
|
||||
if (baseFeeXRP)
|
||||
fees.base = baseFeeXRP->xrp();
|
||||
|
||||
if (reserveBaseXRP)
|
||||
fees.reserve = reserveBaseXRP->xrp();
|
||||
|
||||
if (reserveIncrementXRP)
|
||||
fees.increment = reserveIncrementXRP->xrp();
|
||||
|
||||
hasNewFields = baseFeeXRP || reserveBaseXRP || reserveIncrementXRP;
|
||||
}
|
||||
|
||||
if (not hasNewFields) {
|
||||
// Fallback to old fields
|
||||
auto const baseFee = sle.at(~ripple::sfBaseFee);
|
||||
auto const reserveBase = sle.at(~ripple::sfReserveBase);
|
||||
auto const reserveIncrement = sle.at(~ripple::sfReserveIncrement);
|
||||
|
||||
if (baseFee)
|
||||
fees.base = baseFee.value();
|
||||
|
||||
if (reserveBase)
|
||||
fees.reserve = reserveBase.value();
|
||||
|
||||
if (reserveIncrement)
|
||||
fees.increment = reserveIncrement.value();
|
||||
}
|
||||
|
||||
return fees;
|
||||
}
|
||||
|
||||
} // namespace data
|
||||
615
src/data/BackendInterface.hpp
Normal file
615
src/data/BackendInterface.hpp
Normal file
@@ -0,0 +1,615 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2022, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/LedgerCache.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/executor_work_guard.hpp>
|
||||
#include <boost/asio/io_context.hpp>
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <boost/utility/result_of.hpp>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/Fees.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <optional>
|
||||
#include <shared_mutex>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
namespace data {
|
||||
|
||||
/**
|
||||
* @brief Represents a database timeout error.
|
||||
*/
|
||||
class DatabaseTimeout : public std::exception {
|
||||
public:
|
||||
char const*
|
||||
what() const throw() override
|
||||
{
|
||||
return "Database read timed out. Please retry the request";
|
||||
}
|
||||
};
|
||||
|
||||
static constexpr std::size_t DEFAULT_WAIT_BETWEEN_RETRY = 500;
|
||||
/**
|
||||
* @brief A helper function that catches DatabaseTimout exceptions and retries indefinitely.
|
||||
*
|
||||
* @tparam FnType The type of function object to execute
|
||||
* @param func The function object to execute
|
||||
* @param waitMs Delay between retry attempts
|
||||
* @return auto The same as the return type of func
|
||||
*/
|
||||
template <class FnType>
|
||||
auto
|
||||
retryOnTimeout(FnType func, size_t waitMs = DEFAULT_WAIT_BETWEEN_RETRY)
|
||||
{
|
||||
static util::Logger const log{"Backend"};
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
return func();
|
||||
} catch (DatabaseTimeout const&) {
|
||||
LOG(log.error()) << "Database request timed out. Sleeping and retrying ... ";
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(waitMs));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchronously executes the given function object inside a coroutine.
|
||||
*
|
||||
* @tparam FnType The type of function object to execute
|
||||
* @param func The function object to execute
|
||||
* @return auto The same as the return type of func
|
||||
*/
|
||||
template <class FnType>
|
||||
auto
|
||||
synchronous(FnType&& func)
|
||||
{
|
||||
boost::asio::io_context ctx;
|
||||
|
||||
using R = typename boost::result_of<FnType(boost::asio::yield_context)>::type;
|
||||
if constexpr (!std::is_same_v<R, void>) {
|
||||
R res;
|
||||
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func, &res](auto yield) {
|
||||
res = func(yield);
|
||||
});
|
||||
|
||||
ctx.run();
|
||||
return res;
|
||||
} else {
|
||||
boost::asio::spawn(ctx, [_ = boost::asio::make_work_guard(ctx), &func](auto yield) { func(yield); });
|
||||
ctx.run();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Synchronously execute the given function object and retry until no DatabaseTimeout is thrown.
|
||||
*
|
||||
* @tparam FnType The type of function object to execute
|
||||
* @param func The function object to execute
|
||||
* @return auto The same as the return type of func
|
||||
*/
|
||||
template <class FnType>
|
||||
auto
|
||||
synchronousAndRetryOnTimeout(FnType&& func)
|
||||
{
|
||||
return retryOnTimeout([&]() { return synchronous(func); });
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief The interface to the database used by Clio.
|
||||
*/
|
||||
class BackendInterface {
|
||||
protected:
|
||||
mutable std::shared_mutex rngMtx_;
|
||||
std::optional<LedgerRange> range;
|
||||
LedgerCache cache_;
|
||||
|
||||
public:
|
||||
BackendInterface() = default;
|
||||
virtual ~BackendInterface() = default;
|
||||
|
||||
// TODO: Remove this hack. Cache should not be exposed thru BackendInterface
|
||||
/**
|
||||
* @return Immutable cache
|
||||
*/
|
||||
LedgerCache const&
|
||||
cache() const
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Mutable cache
|
||||
*/
|
||||
LedgerCache&
|
||||
cache()
|
||||
{
|
||||
return cache_;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger by sequence number.
|
||||
*
|
||||
* @param sequence The sequence number to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The ripple::LedgerHeader if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerBySequence(std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger by hash.
|
||||
*
|
||||
* @param hash The hash to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The ripple::LedgerHeader if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches the latest ledger sequence.
|
||||
*
|
||||
* @param yield The coroutine context
|
||||
* @return Latest sequence wrapped in an optional if found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetch the current ledger range.
|
||||
*
|
||||
* @return The current ledger range if populated; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
fetchLedgerRange() const;
|
||||
|
||||
/**
|
||||
* @brief Updates the range of sequences that are stored in the DB.
|
||||
*
|
||||
* @param newMax The new maximum sequence available
|
||||
*/
|
||||
void
|
||||
updateRange(uint32_t newMax);
|
||||
|
||||
/**
|
||||
* @brief Sets the range of sequences that are stored in the DB.
|
||||
*
|
||||
* @param min The new minimum sequence available
|
||||
* @param max The new maximum sequence available
|
||||
* @param force If set to true, the range will be set even if it's already set
|
||||
*/
|
||||
void
|
||||
setRange(uint32_t min, uint32_t max, bool force = false);
|
||||
|
||||
/**
|
||||
* @brief Fetch the fees from a specific ledger sequence.
|
||||
*
|
||||
* @param seq The sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return ripple::Fees if fees are found; nullopt otherwise
|
||||
*/
|
||||
std::optional<ripple::Fees>
|
||||
fetchFees(std::uint32_t seq, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific transaction.
|
||||
*
|
||||
* @param hash The hash of the transaction to fetch
|
||||
* @param yield The coroutine context
|
||||
* @return TransactionAndMetadata if transaction is found; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches multiple transactions.
|
||||
*
|
||||
* @param hashes A vector of hashes to fetch transactions for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of TransactionAndMetadata matching the given hashes
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific account.
|
||||
*
|
||||
* @param account The account to fetch transactions for
|
||||
* @param limit The maximum number of transactions per result page
|
||||
* @param forward Whether to fetch the page forwards or backwards from the given cursor
|
||||
* @param cursor The cursor to resume fetching from
|
||||
* @param yield The coroutine context
|
||||
* @return Results and a cursor to resume from
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursor,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return Results as a vector of TransactionAndMetadata
|
||||
*/
|
||||
virtual std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transaction hashes from a specific ledger.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return Hashes as ripple::uint256 in a vector
|
||||
*/
|
||||
virtual std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific NFT.
|
||||
*
|
||||
* @param tokenID The ID of the NFT
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return NFT object on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<NFT>
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all transactions for a specific NFT.
|
||||
*
|
||||
* @param tokenID The ID of the NFT
|
||||
* @param limit The maximum number of transactions per result page
|
||||
* @param forward Whether to fetch the page forwards or backwards from the given cursor
|
||||
* @param cursorIn The cursor to resume fetching from
|
||||
* @param yield The coroutine context
|
||||
* @return Results and a cursor to resume from
|
||||
*/
|
||||
virtual TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches all NFTs issued by a given address.
|
||||
*
|
||||
* @param issuer AccountID of issuer you wish you query.
|
||||
* @param taxon Optional taxon of NFTs by which you wish to filter.
|
||||
* @param limit Paging limit.
|
||||
* @param cursorIn Optional cursor to allow us to pick up from where we
|
||||
* last left off.
|
||||
* @param yield Currently executing coroutine.
|
||||
* @return std::vector<NFT> of NFTs issued by this account, or
|
||||
* this issuer/taxon combination if taxon is passed and an optional marker
|
||||
*/
|
||||
virtual NFTsAndCursor
|
||||
fetchNFTsByIssuer(
|
||||
ripple::AccountID const& issuer,
|
||||
std::optional<std::uint32_t> const& taxon,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a specific ledger object.
|
||||
*
|
||||
* Currently the real fetch happens in doFetchLedgerObject and fetchLedgerObject attempts to fetch from Cache first
|
||||
* and only calls out to the real DB if a cache miss ocurred.
|
||||
*
|
||||
* @param key The key of the object
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The object as a Blob on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<Blob>
|
||||
fetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches all ledger objects by their keys.
|
||||
*
|
||||
* Currently the real fetch happens in doFetchLedgerObjects and fetchLedgerObjects attempts to fetch from Cache
|
||||
* first and only calls out to the real DB for each of the keys that was not found in the cache.
|
||||
*
|
||||
* @param keys A vector with the keys of the objects to fetch
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of ledger objects as Blobs
|
||||
*/
|
||||
std::vector<Blob>
|
||||
fetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching a ledger object.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The object as a Blob on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t sequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief The database-specific implementation for fetching ledger objects.
|
||||
*
|
||||
* @param keys The keys to fetch for
|
||||
* @param sequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of Blobs representing each fetched object
|
||||
*/
|
||||
virtual std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Returns the difference between ledgers.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return A vector of LedgerObject representing the diff
|
||||
*/
|
||||
virtual std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches a page of ledger objects, ordered by key/index.
|
||||
*
|
||||
* @param cursor The cursor to resume fetching from
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param limit The maximum number of transactions per result page
|
||||
* @param outOfOrder If set to true max available sequence is used instead of ledgerSequence
|
||||
* @param yield The coroutine context
|
||||
* @return The ledger page
|
||||
*/
|
||||
LedgerPage
|
||||
fetchLedgerPage(
|
||||
std::optional<ripple::uint256> const& cursor,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
bool outOfOrder,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the successor object.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The sucessor on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerObject>
|
||||
fetchSuccessorObject(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the successor key.
|
||||
*
|
||||
* Thea real fetch happens in doFetchSuccessorKey. This function will attempt to lookup the successor in the cache
|
||||
* first and only if it's not found in the cache will it fetch from the actual DB.
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The sucessor key on success; nullopt otherwise
|
||||
*/
|
||||
std::optional<ripple::uint256>
|
||||
fetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const;
|
||||
|
||||
/**
|
||||
* @brief Database-specific implementation of fetching the successor key
|
||||
*
|
||||
* @param key The key to fetch for
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param yield The coroutine context
|
||||
* @return The sucessor on success; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t ledgerSequence, boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches book offers.
|
||||
*
|
||||
* @param book Unsigned 256-bit integer.
|
||||
* @param ledgerSequence The ledger sequence to fetch for
|
||||
* @param limit Pagaing limit as to how many transactions returned per page.
|
||||
* @param yield The coroutine context
|
||||
* @return The book offers page
|
||||
*/
|
||||
BookOffersPage
|
||||
fetchBookOffers(
|
||||
ripple::uint256 const& book,
|
||||
std::uint32_t ledgerSequence,
|
||||
std::uint32_t limit,
|
||||
boost::asio::yield_context yield
|
||||
) const;
|
||||
|
||||
/**
|
||||
* @brief Synchronously fetches the ledger range from DB.
|
||||
*
|
||||
* This function just wraps hardFetchLedgerRange(boost::asio::yield_context) using synchronous(FnType&&).
|
||||
*
|
||||
* @return The ledger range if available; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange() const;
|
||||
|
||||
/**
|
||||
* @brief Fetches the ledger range from DB.
|
||||
*
|
||||
* @return The ledger range if available; nullopt otherwise
|
||||
*/
|
||||
virtual std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context yield) const = 0;
|
||||
|
||||
/**
|
||||
* @brief Fetches the ledger range from DB retrying until no DatabaseTimeout is thrown.
|
||||
*
|
||||
* @return The ledger range if available; nullopt otherwise
|
||||
*/
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRangeNoThrow() const;
|
||||
|
||||
/**
|
||||
* @brief Writes to a specific ledger.
|
||||
*
|
||||
* @param ledgerHeader Ledger header.
|
||||
* @param blob r-value string serialization of ledger header.
|
||||
*/
|
||||
virtual void
|
||||
writeLedger(ripple::LedgerHeader const& ledgerHeader, std::string&& blob) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes a new ledger object.
|
||||
*
|
||||
* @param key The key to write the ledger object under
|
||||
* @param seq The ledger sequence to write for
|
||||
* @param blob The data to write
|
||||
*/
|
||||
virtual void
|
||||
writeLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob);
|
||||
|
||||
/**
|
||||
* @brief Writes a new transaction.
|
||||
*
|
||||
* @param hash The hash of the transaction
|
||||
* @param seq The ledger sequence to write for
|
||||
* @param date The timestamp of the entry
|
||||
* @param transaction The transaction data to write
|
||||
* @param metadata The metadata to write
|
||||
*/
|
||||
virtual void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t seq,
|
||||
std::uint32_t date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata
|
||||
) = 0;
|
||||
|
||||
/**
|
||||
* @brief Writes NFTs to the database.
|
||||
*
|
||||
* @param data A vector of NFTsData objects representing the NFTs
|
||||
*/
|
||||
virtual void
|
||||
writeNFTs(std::vector<NFTsData> const& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new set of account transactions.
|
||||
*
|
||||
* @param data A vector of AccountTransactionsData objects representing the account transactions
|
||||
*/
|
||||
virtual void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData> data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write NFTs transactions.
|
||||
*
|
||||
* @param data A vector of NFTTransactionsData objects
|
||||
*/
|
||||
virtual void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) = 0;
|
||||
|
||||
/**
|
||||
* @brief Write a new successor.
|
||||
*
|
||||
* @param key Key of the object that the passed successor will be the successor for
|
||||
* @param seq The ledger sequence to write for
|
||||
* @param successor The successor data to write
|
||||
*/
|
||||
virtual void
|
||||
writeSuccessor(std::string&& key, std::uint32_t seq, std::string&& successor) = 0;
|
||||
|
||||
/**
|
||||
* @brief Starts a write transaction with the DB. No-op for cassandra.
|
||||
*
|
||||
* Note: Can potentially be deprecated and removed.
|
||||
*/
|
||||
virtual void
|
||||
startWrites() const = 0;
|
||||
|
||||
/**
|
||||
* @brief Tells database we finished writing all data for a specific ledger.
|
||||
*
|
||||
* Uses doFinishWrites to synchronize with the pending writes.
|
||||
*
|
||||
* @param ledgerSequence The ledger sequence to finish writing for
|
||||
* @return true on success; false otherwise
|
||||
*/
|
||||
bool
|
||||
finishWrites(std::uint32_t ledgerSequence);
|
||||
|
||||
/**
|
||||
* @return true if database is overwhelmed; false otherwise
|
||||
*/
|
||||
virtual bool
|
||||
isTooBusy() const = 0;
|
||||
|
||||
/**
|
||||
* @return json object containing backend usage statistics
|
||||
*/
|
||||
virtual boost::json::object
|
||||
stats() const = 0;
|
||||
|
||||
private:
|
||||
virtual void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t seq, std::string&& blob) = 0;
|
||||
|
||||
virtual bool
|
||||
doFinishWrites() = 0;
|
||||
};
|
||||
|
||||
} // namespace data
|
||||
using BackendInterface = data::BackendInterface;
|
||||
899
src/data/CassandraBackend.hpp
Normal file
899
src/data/CassandraBackend.hpp
Normal file
@@ -0,0 +1,899 @@
|
||||
//------------------------------------------------------------------------------
|
||||
/*
|
||||
This file is part of clio: https://github.com/XRPLF/clio
|
||||
Copyright (c) 2023, the clio developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
//==============================================================================
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "data/BackendInterface.hpp"
|
||||
#include "data/DBHelpers.hpp"
|
||||
#include "data/Types.hpp"
|
||||
#include "data/cassandra/Concepts.hpp"
|
||||
#include "data/cassandra/Handle.hpp"
|
||||
#include "data/cassandra/Schema.hpp"
|
||||
#include "data/cassandra/SettingsProvider.hpp"
|
||||
#include "data/cassandra/Types.hpp"
|
||||
#include "data/cassandra/impl/ExecutionStrategy.hpp"
|
||||
#include "util/Assert.hpp"
|
||||
#include "util/LedgerUtils.hpp"
|
||||
#include "util/Profiler.hpp"
|
||||
#include "util/log/Logger.hpp"
|
||||
|
||||
#include <boost/asio/spawn.hpp>
|
||||
#include <boost/json/object.hpp>
|
||||
#include <cassandra.h>
|
||||
#include <ripple/basics/Blob.h>
|
||||
#include <ripple/basics/base_uint.h>
|
||||
#include <ripple/basics/strHex.h>
|
||||
#include <ripple/protocol/AccountID.h>
|
||||
#include <ripple/protocol/LedgerHeader.h>
|
||||
#include <ripple/protocol/nft.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <limits>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace data::cassandra {
|
||||
|
||||
/**
|
||||
* @brief Implements @ref BackendInterface for Cassandra/ScyllaDB.
|
||||
*
|
||||
* Note: This is a safer and more correct rewrite of the original implementation of the backend.
|
||||
*
|
||||
* @tparam SettingsProviderType The settings provider type to use
|
||||
* @tparam ExecutionStrategyType The execution strategy type to use
|
||||
*/
|
||||
template <SomeSettingsProvider SettingsProviderType, SomeExecutionStrategy ExecutionStrategyType>
|
||||
class BasicCassandraBackend : public BackendInterface {
|
||||
util::Logger log_{"Backend"};
|
||||
|
||||
SettingsProviderType settingsProvider_;
|
||||
Schema<SettingsProviderType> schema_;
|
||||
Handle handle_;
|
||||
|
||||
// have to be mutable because BackendInterface constness :(
|
||||
mutable ExecutionStrategyType executor_;
|
||||
|
||||
std::atomic_uint32_t ledgerSequence_ = 0u;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Create a new cassandra/scylla backend instance.
|
||||
*
|
||||
* @param settingsProvider The settings provider to use
|
||||
* @param readOnly Whether the database should be in readonly mode
|
||||
*/
|
||||
BasicCassandraBackend(SettingsProviderType settingsProvider, bool readOnly)
|
||||
: settingsProvider_{std::move(settingsProvider)}
|
||||
, schema_{settingsProvider_}
|
||||
, handle_{settingsProvider_.getSettings()}
|
||||
, executor_{settingsProvider_.getSettings(), handle_}
|
||||
{
|
||||
if (auto const res = handle_.connect(); not res)
|
||||
throw std::runtime_error("Could not connect to Cassandra: " + res.error());
|
||||
|
||||
if (not readOnly) {
|
||||
if (auto const res = handle_.execute(schema_.createKeyspace); not res) {
|
||||
// on datastax, creation of keyspaces can be configured to only be done thru the admin
|
||||
// interface. this does not mean that the keyspace does not already exist tho.
|
||||
if (res.error().code() != CASS_ERROR_SERVER_UNAUTHORIZED)
|
||||
throw std::runtime_error("Could not create keyspace: " + res.error());
|
||||
}
|
||||
|
||||
if (auto const res = handle_.executeEach(schema_.createSchema); not res)
|
||||
throw std::runtime_error("Could not create schema: " + res.error());
|
||||
}
|
||||
|
||||
try {
|
||||
schema_.prepareStatements(handle_);
|
||||
} catch (std::runtime_error const& ex) {
|
||||
LOG(log_.error()) << "Failed to prepare the statements: " << ex.what() << "; readOnly: " << readOnly;
|
||||
throw;
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Created (revamped) CassandraBackend";
|
||||
}
|
||||
|
||||
TransactionsAndCursor
|
||||
fetchAccountTransactions(
|
||||
ripple::AccountID const& account,
|
||||
std::uint32_t const limit,
|
||||
bool forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {{}, {}};
|
||||
|
||||
Statement const statement = [this, forward, &account]() {
|
||||
if (forward)
|
||||
return schema_->selectAccountTxForward.bind(account);
|
||||
|
||||
return schema_->selectAccountTx.bind(account);
|
||||
}();
|
||||
|
||||
auto cursor = cursorIn;
|
||||
if (cursor) {
|
||||
statement.bindAt(1, cursor->asTuple());
|
||||
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
} else {
|
||||
auto const seq = forward ? rng->minSequence : rng->maxSequence;
|
||||
auto const placeHolder = forward ? 0u : std::numeric_limits<std::uint32_t>::max();
|
||||
|
||||
statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
|
||||
LOG(log_.debug()) << "account = " << ripple::strHex(account) << " idx = " << seq
|
||||
<< " tuple = " << placeHolder;
|
||||
}
|
||||
|
||||
// FIXME: Limit is a hack to support uint32_t properly for the time
|
||||
// being. Should be removed later and schema updated to use proper
|
||||
// types.
|
||||
statement.bindAt(2, Limit{limit});
|
||||
auto const res = executor_.read(yield, statement);
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> hashes = {};
|
||||
auto numRows = results.numRows();
|
||||
LOG(log_.info()) << "num_rows = " << numRows;
|
||||
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
|
||||
hashes.push_back(hash);
|
||||
if (--numRows == 0) {
|
||||
LOG(log_.debug()) << "Setting cursor";
|
||||
cursor = data;
|
||||
|
||||
// forward queries by ledger/tx sequence `>=`
|
||||
// so we have to advance the index by one
|
||||
if (forward)
|
||||
++cursor->transactionIndex;
|
||||
}
|
||||
}
|
||||
|
||||
auto const txns = fetchTransactions(hashes, yield);
|
||||
LOG(log_.debug()) << "Txns = " << txns.size();
|
||||
|
||||
if (txns.size() == limit) {
|
||||
LOG(log_.debug()) << "Returning cursor";
|
||||
return {txns, cursor};
|
||||
}
|
||||
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
bool
|
||||
doFinishWrites() override
|
||||
{
|
||||
// wait for other threads to finish their writes
|
||||
executor_.sync();
|
||||
|
||||
if (!range) {
|
||||
executor_.writeSync(schema_->updateLedgerRange, ledgerSequence_, false, ledgerSequence_);
|
||||
}
|
||||
|
||||
if (not executeSyncUpdate(schema_->updateLedgerRange.bind(ledgerSequence_, true, ledgerSequence_ - 1))) {
|
||||
LOG(log_.warn()) << "Update failed for ledger " << ledgerSequence_;
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG(log_.info()) << "Committed ledger " << ledgerSequence_;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
writeLedger(ripple::LedgerHeader const& ledgerInfo, std::string&& blob) override
|
||||
{
|
||||
executor_.write(schema_->insertLedgerHeader, ledgerInfo.seq, std::move(blob));
|
||||
|
||||
executor_.write(schema_->insertLedgerHash, ledgerInfo.hash, ledgerInfo.seq);
|
||||
|
||||
ledgerSequence_ = ledgerInfo.seq;
|
||||
}
|
||||
|
||||
std::optional<std::uint32_t>
|
||||
fetchLatestLedgerSequence(boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLatestLedger); res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
|
||||
return maybeValue;
|
||||
|
||||
LOG(log_.error()) << "Could not fetch latest ledger - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch latest ledger - no result";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch latest ledger: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerBySequence(std::uint32_t const sequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectLedgerBySeq, sequence);
|
||||
if (res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<std::vector<unsigned char>>(); maybeValue) {
|
||||
return util::deserializeHeader(ripple::makeSlice(*maybeValue));
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence - no result";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger by sequence: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<ripple::LedgerHeader>
|
||||
fetchLedgerByHash(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectLedgerByHash, hash); res) {
|
||||
if (auto const& result = res.value(); result) {
|
||||
if (auto const maybeValue = result.template get<uint32_t>(); maybeValue)
|
||||
return fetchLedgerBySequence(*maybeValue, yield);
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash - no result";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger by hash: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<LedgerRange>
|
||||
hardFetchLedgerRange(boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectLedgerRange);
|
||||
if (res) {
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "Could not fetch ledger range - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
// TODO: this is probably a good place to use user type in
|
||||
// cassandra instead of having two rows with bool flag. or maybe at
|
||||
// least use tuple<int, int>?
|
||||
LedgerRange range;
|
||||
std::size_t idx = 0;
|
||||
for (auto [seq] : extract<uint32_t>(results)) {
|
||||
if (idx == 0) {
|
||||
range.maxSequence = range.minSequence = seq;
|
||||
} else if (idx == 1) {
|
||||
range.maxSequence = seq;
|
||||
}
|
||||
|
||||
++idx;
|
||||
}
|
||||
|
||||
if (range.minSequence > range.maxSequence)
|
||||
std::swap(range.minSequence, range.maxSequence);
|
||||
|
||||
LOG(log_.debug()) << "After hardFetchLedgerRange range is " << range.minSequence << ":"
|
||||
<< range.maxSequence;
|
||||
return range;
|
||||
}
|
||||
LOG(log_.error()) << "Could not fetch ledger range: " << res.error();
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchAllTransactionsInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto hashes = fetchAllTransactionHashesInLedger(ledgerSequence, yield);
|
||||
return fetchTransactions(hashes, yield);
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256>
|
||||
fetchAllTransactionHashesInLedger(std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
auto start = std::chrono::system_clock::now();
|
||||
auto const res = executor_.read(yield, schema_->selectAllTransactionHashesInLedger, ledgerSequence);
|
||||
|
||||
if (not res) {
|
||||
LOG(log_.error()) << "Could not fetch all transaction hashes: " << res.error();
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& result = res.value();
|
||||
if (not result.hasRows()) {
|
||||
LOG(log_.error()) << "Could not fetch all transaction hashes - no rows; ledger = "
|
||||
<< std::to_string(ledgerSequence);
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> hashes;
|
||||
for (auto [hash] : extract<ripple::uint256>(result))
|
||||
hashes.push_back(std::move(hash));
|
||||
|
||||
auto end = std::chrono::system_clock::now();
|
||||
LOG(log_.debug()) << "Fetched " << hashes.size() << " transaction hashes from Cassandra in "
|
||||
<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()
|
||||
<< " milliseconds";
|
||||
|
||||
return hashes;
|
||||
}
|
||||
|
||||
std::optional<NFT>
|
||||
fetchNFT(ripple::uint256 const& tokenID, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
auto const res = executor_.read(yield, schema_->selectNFT, tokenID, ledgerSequence);
|
||||
if (not res)
|
||||
return std::nullopt;
|
||||
|
||||
if (auto const maybeRow = res->template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
auto result = std::make_optional<NFT>(tokenID, seq, owner, isBurned);
|
||||
|
||||
// now fetch URI. Usually we will have the URI even for burned NFTs,
|
||||
// but if the first ledger on this clio included NFTokenBurn
|
||||
// transactions we will not have the URIs for any of those tokens.
|
||||
// In any other case not having the URI indicates something went
|
||||
// wrong with our data.
|
||||
//
|
||||
// TODO - in the future would be great for any handlers that use
|
||||
// this could inject a warning in this case (the case of not having
|
||||
// a URI because it was burned in the first ledger) to indicate that
|
||||
// even though we are returning a blank URI, the NFT might have had
|
||||
// one.
|
||||
auto uriRes = executor_.read(yield, schema_->selectNFTURI, tokenID, ledgerSequence);
|
||||
if (uriRes) {
|
||||
if (auto const maybeUri = uriRes->template get<ripple::Blob>(); maybeUri)
|
||||
result->uri = *maybeUri;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
LOG(log_.error()) << "Could not fetch NFT - no rows";
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
TransactionsAndCursor
|
||||
fetchNFTTransactions(
|
||||
ripple::uint256 const& tokenID,
|
||||
std::uint32_t const limit,
|
||||
bool const forward,
|
||||
std::optional<TransactionsCursor> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
auto rng = fetchLedgerRange();
|
||||
if (!rng)
|
||||
return {{}, {}};
|
||||
|
||||
Statement const statement = [this, forward, &tokenID]() {
|
||||
if (forward)
|
||||
return schema_->selectNFTTxForward.bind(tokenID);
|
||||
|
||||
return schema_->selectNFTTx.bind(tokenID);
|
||||
}();
|
||||
|
||||
auto cursor = cursorIn;
|
||||
if (cursor) {
|
||||
statement.bindAt(1, cursor->asTuple());
|
||||
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " tuple = " << cursor->ledgerSequence
|
||||
<< cursor->transactionIndex;
|
||||
} else {
|
||||
auto const seq = forward ? rng->minSequence : rng->maxSequence;
|
||||
auto const placeHolder = forward ? 0 : std::numeric_limits<std::uint32_t>::max();
|
||||
|
||||
statement.bindAt(1, std::make_tuple(placeHolder, placeHolder));
|
||||
LOG(log_.debug()) << "token_id = " << ripple::strHex(tokenID) << " idx = " << seq
|
||||
<< " tuple = " << placeHolder;
|
||||
}
|
||||
|
||||
statement.bindAt(2, Limit{limit});
|
||||
|
||||
auto const res = executor_.read(yield, statement);
|
||||
auto const& results = res.value();
|
||||
if (not results.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> hashes = {};
|
||||
auto numRows = results.numRows();
|
||||
LOG(log_.info()) << "num_rows = " << numRows;
|
||||
|
||||
for (auto [hash, data] : extract<ripple::uint256, std::tuple<uint32_t, uint32_t>>(results)) {
|
||||
hashes.push_back(hash);
|
||||
if (--numRows == 0) {
|
||||
LOG(log_.debug()) << "Setting cursor";
|
||||
cursor = data;
|
||||
|
||||
// forward queries by ledger/tx sequence `>=`
|
||||
// so we have to advance the index by one
|
||||
if (forward)
|
||||
++cursor->transactionIndex;
|
||||
}
|
||||
}
|
||||
|
||||
auto const txns = fetchTransactions(hashes, yield);
|
||||
LOG(log_.debug()) << "NFT Txns = " << txns.size();
|
||||
|
||||
if (txns.size() == limit) {
|
||||
LOG(log_.debug()) << "Returning cursor";
|
||||
return {txns, cursor};
|
||||
}
|
||||
|
||||
return {txns, {}};
|
||||
}
|
||||
|
||||
NFTsAndCursor
|
||||
fetchNFTsByIssuer(
|
||||
ripple::AccountID const& issuer,
|
||||
std::optional<std::uint32_t> const& taxon,
|
||||
std::uint32_t const ledgerSequence,
|
||||
std::uint32_t const limit,
|
||||
std::optional<ripple::uint256> const& cursorIn,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
NFTsAndCursor ret;
|
||||
|
||||
Statement const idQueryStatement = [&taxon, &issuer, &cursorIn, &limit, this]() {
|
||||
if (taxon.has_value()) {
|
||||
auto r = schema_->selectNFTIDsByIssuerTaxon.bind(issuer);
|
||||
r.bindAt(1, *taxon);
|
||||
r.bindAt(2, cursorIn.value_or(ripple::uint256(0)));
|
||||
r.bindAt(3, Limit{limit});
|
||||
return r;
|
||||
}
|
||||
|
||||
auto r = schema_->selectNFTIDsByIssuer.bind(issuer);
|
||||
r.bindAt(
|
||||
1,
|
||||
std::make_tuple(
|
||||
cursorIn.has_value() ? ripple::nft::toUInt32(ripple::nft::getTaxon(*cursorIn)) : 0,
|
||||
cursorIn.value_or(ripple::uint256(0))
|
||||
)
|
||||
);
|
||||
r.bindAt(2, Limit{limit});
|
||||
return r;
|
||||
}();
|
||||
|
||||
// Query for all the NFTs issued by the account, potentially filtered by the taxon
|
||||
auto const res = executor_.read(yield, idQueryStatement);
|
||||
|
||||
auto const& idQueryResults = res.value();
|
||||
if (not idQueryResults.hasRows()) {
|
||||
LOG(log_.debug()) << "No rows returned";
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> nftIDs;
|
||||
for (auto const [nftID] : extract<ripple::uint256>(idQueryResults))
|
||||
nftIDs.push_back(nftID);
|
||||
|
||||
if (nftIDs.empty())
|
||||
return ret;
|
||||
|
||||
if (nftIDs.size() == limit)
|
||||
ret.cursor = nftIDs.back();
|
||||
|
||||
std::vector<Statement> selectNFTStatements;
|
||||
selectNFTStatements.reserve(nftIDs.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(nftIDs),
|
||||
std::cend(nftIDs),
|
||||
std::back_inserter(selectNFTStatements),
|
||||
[&](auto const& nftID) { return schema_->selectNFT.bind(nftID, ledgerSequence); }
|
||||
);
|
||||
|
||||
auto const nftInfos = executor_.readEach(yield, selectNFTStatements);
|
||||
|
||||
std::vector<Statement> selectNFTURIStatements;
|
||||
selectNFTURIStatements.reserve(nftIDs.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(nftIDs),
|
||||
std::cend(nftIDs),
|
||||
std::back_inserter(selectNFTURIStatements),
|
||||
[&](auto const& nftID) { return schema_->selectNFTURI.bind(nftID, ledgerSequence); }
|
||||
);
|
||||
|
||||
auto const nftUris = executor_.readEach(yield, selectNFTURIStatements);
|
||||
|
||||
for (auto i = 0u; i < nftIDs.size(); i++) {
|
||||
if (auto const maybeRow = nftInfos[i].template get<uint32_t, ripple::AccountID, bool>(); maybeRow) {
|
||||
auto [seq, owner, isBurned] = *maybeRow;
|
||||
NFT nft(nftIDs[i], seq, owner, isBurned);
|
||||
if (auto const maybeUri = nftUris[i].template get<ripple::Blob>(); maybeUri)
|
||||
nft.uri = *maybeUri;
|
||||
ret.nfts.push_back(nft);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::optional<Blob>
|
||||
doFetchLedgerObject(ripple::uint256 const& key, std::uint32_t const sequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
LOG(log_.debug()) << "Fetching ledger object for seq " << sequence << ", key = " << ripple::to_string(key);
|
||||
if (auto const res = executor_.read(yield, schema_->selectObject, key, sequence); res) {
|
||||
if (auto const result = res->template get<Blob>(); result) {
|
||||
if (result->size())
|
||||
return *result;
|
||||
} else {
|
||||
LOG(log_.debug()) << "Could not fetch ledger object - no rows";
|
||||
}
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch ledger object: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<TransactionAndMetadata>
|
||||
fetchTransaction(ripple::uint256 const& hash, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectTransaction, hash); res) {
|
||||
if (auto const maybeValue = res->template get<Blob, Blob, uint32_t, uint32_t>(); maybeValue) {
|
||||
auto [transaction, meta, seq, date] = *maybeValue;
|
||||
return std::make_optional<TransactionAndMetadata>(transaction, meta, seq, date);
|
||||
}
|
||||
|
||||
LOG(log_.debug()) << "Could not fetch transaction - no rows";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch transaction: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::optional<ripple::uint256>
|
||||
doFetchSuccessorKey(ripple::uint256 key, std::uint32_t const ledgerSequence, boost::asio::yield_context yield)
|
||||
const override
|
||||
{
|
||||
if (auto const res = executor_.read(yield, schema_->selectSuccessor, key, ledgerSequence); res) {
|
||||
if (auto const result = res->template get<ripple::uint256>(); result) {
|
||||
if (*result == lastKey)
|
||||
return std::nullopt;
|
||||
return *result;
|
||||
}
|
||||
|
||||
LOG(log_.debug()) << "Could not fetch successor - no rows";
|
||||
} else {
|
||||
LOG(log_.error()) << "Could not fetch successor: " << res.error();
|
||||
}
|
||||
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
std::vector<TransactionAndMetadata>
|
||||
fetchTransactions(std::vector<ripple::uint256> const& hashes, boost::asio::yield_context yield) const override
|
||||
{
|
||||
if (hashes.empty())
|
||||
return {};
|
||||
|
||||
auto const numHashes = hashes.size();
|
||||
std::vector<TransactionAndMetadata> results;
|
||||
results.reserve(numHashes);
|
||||
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(numHashes);
|
||||
|
||||
auto const timeDiff = util::timed([this, yield, &results, &hashes, &statements]() {
|
||||
// TODO: seems like a job for "hash IN (list of hashes)" instead?
|
||||
std::transform(
|
||||
std::cbegin(hashes),
|
||||
std::cend(hashes),
|
||||
std::back_inserter(statements),
|
||||
[this](auto const& hash) { return schema_->selectTransaction.bind(hash); }
|
||||
);
|
||||
|
||||
auto const entries = executor_.readEach(yield, statements);
|
||||
std::transform(
|
||||
std::cbegin(entries),
|
||||
std::cend(entries),
|
||||
std::back_inserter(results),
|
||||
[](auto const& res) -> TransactionAndMetadata {
|
||||
if (auto const maybeRow = res.template get<Blob, Blob, uint32_t, uint32_t>(); maybeRow)
|
||||
return *maybeRow;
|
||||
|
||||
return {};
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
ASSERT(numHashes == results.size(), "Number of hashes and results must match");
|
||||
LOG(log_.debug()) << "Fetched " << numHashes << " transactions from Cassandra in " << timeDiff
|
||||
<< " milliseconds";
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<Blob>
|
||||
doFetchLedgerObjects(
|
||||
std::vector<ripple::uint256> const& keys,
|
||||
std::uint32_t const sequence,
|
||||
boost::asio::yield_context yield
|
||||
) const override
|
||||
{
|
||||
if (keys.empty())
|
||||
return {};
|
||||
|
||||
auto const numKeys = keys.size();
|
||||
LOG(log_.trace()) << "Fetching " << numKeys << " objects";
|
||||
|
||||
std::vector<Blob> results;
|
||||
results.reserve(numKeys);
|
||||
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(numKeys);
|
||||
|
||||
// TODO: seems like a job for "key IN (list of keys)" instead?
|
||||
std::transform(
|
||||
std::cbegin(keys),
|
||||
std::cend(keys),
|
||||
std::back_inserter(statements),
|
||||
[this, &sequence](auto const& key) { return schema_->selectObject.bind(key, sequence); }
|
||||
);
|
||||
|
||||
auto const entries = executor_.readEach(yield, statements);
|
||||
std::transform(
|
||||
std::cbegin(entries),
|
||||
std::cend(entries),
|
||||
std::back_inserter(results),
|
||||
[](auto const& res) -> Blob {
|
||||
if (auto const maybeValue = res.template get<Blob>(); maybeValue)
|
||||
return *maybeValue;
|
||||
|
||||
return {};
|
||||
}
|
||||
);
|
||||
|
||||
LOG(log_.trace()) << "Fetched " << numKeys << " objects";
|
||||
return results;
|
||||
}
|
||||
|
||||
std::vector<LedgerObject>
|
||||
fetchLedgerDiff(std::uint32_t const ledgerSequence, boost::asio::yield_context yield) const override
|
||||
{
|
||||
auto const [keys, timeDiff] = util::timed([this, &ledgerSequence, yield]() -> std::vector<ripple::uint256> {
|
||||
auto const res = executor_.read(yield, schema_->selectDiff, ledgerSequence);
|
||||
if (not res) {
|
||||
LOG(log_.error()) << "Could not fetch ledger diff: " << res.error() << "; ledger = " << ledgerSequence;
|
||||
return {};
|
||||
}
|
||||
|
||||
auto const& results = res.value();
|
||||
if (not results) {
|
||||
LOG(log_.error()) << "Could not fetch ledger diff - no rows; ledger = " << ledgerSequence;
|
||||
return {};
|
||||
}
|
||||
|
||||
std::vector<ripple::uint256> resultKeys;
|
||||
for (auto [key] : extract<ripple::uint256>(results))
|
||||
resultKeys.push_back(key);
|
||||
|
||||
return resultKeys;
|
||||
});
|
||||
|
||||
// one of the above errors must have happened
|
||||
if (keys.empty())
|
||||
return {};
|
||||
|
||||
LOG(log_.debug()) << "Fetched " << keys.size() << " diff hashes from Cassandra in " << timeDiff
|
||||
<< " milliseconds";
|
||||
|
||||
auto const objs = fetchLedgerObjects(keys, ledgerSequence, yield);
|
||||
std::vector<LedgerObject> results;
|
||||
results.reserve(keys.size());
|
||||
|
||||
std::transform(
|
||||
std::cbegin(keys),
|
||||
std::cend(keys),
|
||||
std::cbegin(objs),
|
||||
std::back_inserter(results),
|
||||
[](auto const& key, auto const& obj) {
|
||||
return LedgerObject{key, obj};
|
||||
}
|
||||
);
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
void
|
||||
doWriteLedgerObject(std::string&& key, std::uint32_t const seq, std::string&& blob) override
|
||||
{
|
||||
LOG(log_.trace()) << " Writing ledger object " << key.size() << ":" << seq << " [" << blob.size() << " bytes]";
|
||||
|
||||
if (range)
|
||||
executor_.write(schema_->insertDiff, seq, key);
|
||||
|
||||
executor_.write(schema_->insertObject, std::move(key), seq, std::move(blob));
|
||||
}
|
||||
|
||||
void
|
||||
writeSuccessor(std::string&& key, std::uint32_t const seq, std::string&& successor) override
|
||||
{
|
||||
LOG(log_.trace()) << "Writing successor. key = " << key.size() << " bytes. "
|
||||
<< " seq = " << std::to_string(seq) << " successor = " << successor.size() << " bytes.";
|
||||
ASSERT(!key.empty(), "Key must not be empty");
|
||||
ASSERT(!successor.empty(), "Successor must not be empty");
|
||||
|
||||
executor_.write(schema_->insertSuccessor, std::move(key), seq, std::move(successor));
|
||||
}
|
||||
|
||||
void
|
||||
writeAccountTransactions(std::vector<AccountTransactionsData> data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size() * 10); // assume 10 transactions avg
|
||||
|
||||
for (auto& record : data) {
|
||||
std::transform(
|
||||
std::begin(record.accounts),
|
||||
std::end(record.accounts),
|
||||
std::back_inserter(statements),
|
||||
[this, &record](auto&& account) {
|
||||
return schema_->insertAccountTx.bind(
|
||||
std::forward<decltype(account)>(account),
|
||||
std::make_tuple(record.ledgerSequence, record.transactionIndex),
|
||||
record.txHash
|
||||
);
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeNFTTransactions(std::vector<NFTTransactionsData> const& data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size());
|
||||
|
||||
std::transform(std::cbegin(data), std::cend(data), std::back_inserter(statements), [this](auto const& record) {
|
||||
return schema_->insertNFTTx.bind(
|
||||
record.tokenID, std::make_tuple(record.ledgerSequence, record.transactionIndex), record.txHash
|
||||
);
|
||||
});
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
writeTransaction(
|
||||
std::string&& hash,
|
||||
std::uint32_t const seq,
|
||||
std::uint32_t const date,
|
||||
std::string&& transaction,
|
||||
std::string&& metadata
|
||||
) override
|
||||
{
|
||||
LOG(log_.trace()) << "Writing txn to cassandra";
|
||||
|
||||
executor_.write(schema_->insertLedgerTransaction, seq, hash);
|
||||
executor_.write(
|
||||
schema_->insertTransaction, std::move(hash), seq, date, std::move(transaction), std::move(metadata)
|
||||
);
|
||||
}
|
||||
|
||||
void
|
||||
writeNFTs(std::vector<NFTsData> const& data) override
|
||||
{
|
||||
std::vector<Statement> statements;
|
||||
statements.reserve(data.size() * 3);
|
||||
|
||||
for (NFTsData const& record : data) {
|
||||
statements.push_back(
|
||||
schema_->insertNFT.bind(record.tokenID, record.ledgerSequence, record.owner, record.isBurned)
|
||||
);
|
||||
|
||||
// If `uri` is set (and it can be set to an empty uri), we know this
|
||||
// is a net-new NFT. That is, this NFT has not been seen before by
|
||||
// us _OR_ it is in the extreme edge case of a re-minted NFT ID with
|
||||
// the same NFT ID as an already-burned token. In this case, we need
|
||||
// to record the URI and link to the issuer_nf_tokens table.
|
||||
if (record.uri) {
|
||||
statements.push_back(schema_->insertIssuerNFT.bind(
|
||||
ripple::nft::getIssuer(record.tokenID),
|
||||
static_cast<uint32_t>(ripple::nft::getTaxon(record.tokenID)),
|
||||
record.tokenID
|
||||
));
|
||||
statements.push_back(
|
||||
schema_->insertNFTURI.bind(record.tokenID, record.ledgerSequence, record.uri.value())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
executor_.write(std::move(statements));
|
||||
}
|
||||
|
||||
void
|
||||
startWrites() const override
|
||||
{
|
||||
// Note: no-op in original implementation too.
|
||||
// probably was used in PG to start a transaction or smth.
|
||||
}
|
||||
|
||||
bool
|
||||
isTooBusy() const override
|
||||
{
|
||||
return executor_.isTooBusy();
|
||||
}
|
||||
|
||||
boost::json::object
|
||||
stats() const override
|
||||
{
|
||||
return executor_.stats();
|
||||
}
|
||||
|
||||
private:
|
||||
bool
|
||||
executeSyncUpdate(Statement statement)
|
||||
{
|
||||
auto const res = executor_.writeSync(statement);
|
||||
auto maybeSuccess = res->template get<bool>();
|
||||
if (not maybeSuccess) {
|
||||
LOG(log_.error()) << "executeSyncUpdate - error getting result - no row";
|
||||
return false;
|
||||
}
|
||||
|
||||
if (not maybeSuccess.value()) {
|
||||
LOG(log_.warn()) << "Update failed. Checking if DB state is what we expect";
|
||||
|
||||
// error may indicate that another writer wrote something.
|
||||
// in this case let's just compare the current state of things
|
||||
// against what we were trying to write in the first place and
|
||||
// use that as the source of truth for the result.
|
||||
auto rng = hardFetchLedgerRangeNoThrow();
|
||||
return rng && rng->maxSequence == ledgerSequence_;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
using CassandraBackend = BasicCassandraBackend<SettingsProvider, detail::DefaultExecutionStrategy<>>;
|
||||
|
||||
} // namespace data::cassandra
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user