mirror of
https://github.com/XRPLF/rippled.git
synced 2026-02-18 21:02:29 +00:00
Compare commits
641 Commits
2.2.0-rc2
...
pratik/Ope
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bff83a2b92 | ||
|
|
6207f8dd42 | ||
|
|
94797a4258 | ||
|
|
11e8d1f8a2 | ||
|
|
9f17d10348 | ||
|
|
ef284692db | ||
|
|
e11f6190b7 | ||
|
|
db2734cbc9 | ||
|
|
bf4674f42b | ||
|
|
f5208fc850 | ||
|
|
2305bc98a4 | ||
|
|
677758b1cc | ||
|
|
25d7c2c4ec | ||
|
|
0a626d95f4 | ||
|
|
6006c281e2 | ||
|
|
e79673cf40 | ||
|
|
7f41012e59 | ||
|
|
b449a6ee84 | ||
|
|
34ef577604 | ||
|
|
3a172301ce | ||
|
|
6c1a92fe93 | ||
|
|
7813683091 | ||
|
|
b814a09a08 | ||
|
|
6d369e0f02 | ||
|
|
b182430178 | ||
|
|
fe31cdc9f6 | ||
|
|
ff4520cc45 | ||
|
|
fe9c8d568f | ||
|
|
a0e09187b9 | ||
|
|
f3627fb5d5 | ||
|
|
5f638f5553 | ||
|
|
92046785d1 | ||
|
|
b90a843ddd | ||
|
|
bb529d0317 | ||
|
|
a2f1973574 | ||
|
|
847e875635 | ||
|
|
778da954b4 | ||
|
|
0586b5678e | ||
|
|
66158d786f | ||
|
|
c57ffdbcb8 | ||
|
|
4e3f953fc4 | ||
|
|
a4f8aa623f | ||
|
|
8695313565 | ||
|
|
68c9d5ca0f | ||
|
|
211054baff | ||
|
|
4fd4e93b3e | ||
|
|
4cd6cc3e01 | ||
|
|
a37c556079 | ||
|
|
5e808794d8 | ||
|
|
12c0d67ff6 | ||
|
|
00d3cee6cc | ||
|
|
96d17b7f66 | ||
|
|
ec44347ffc | ||
|
|
c9458b72ca | ||
|
|
ebcfd6645d | ||
|
|
efa57e872b | ||
|
|
33f4c92b61 | ||
|
|
2601442e16 | ||
|
|
9686604963 | ||
|
|
0efae5d16e | ||
|
|
4755bb8606 | ||
|
|
92d40de4cb | ||
|
|
b2c5927b48 | ||
|
|
7c1183547a | ||
|
|
14467fba5e | ||
|
|
fc00723836 | ||
|
|
c24a6041f7 | ||
|
|
e1d97bea12 | ||
|
|
53aa5ca903 | ||
|
|
510c0d82e9 | ||
|
|
17565d21d4 | ||
|
|
07ff532d30 | ||
|
|
2c37ef7762 | ||
|
|
3c9f5b6252 | ||
|
|
f80059e467 | ||
|
|
d734c8dddd | ||
|
|
44d21b8f6d | ||
|
|
3d1b3a49b3 | ||
|
|
0b87a26f04 | ||
|
|
0f23ad820c | ||
|
|
b7139da4d0 | ||
|
|
40198d9792 | ||
|
|
f059f0beda | ||
|
|
41c1be2bac | ||
|
|
f816ffa55f | ||
|
|
cf748702af | ||
|
|
1eb0fdac65 | ||
|
|
496efb71ca | ||
|
|
9eb84a561e | ||
|
|
62efecbfb1 | ||
|
|
bff5954acf | ||
|
|
42a432c5dc | ||
|
|
4565cc280b | ||
|
|
9625514da8 | ||
|
|
a4c60b4160 | ||
|
|
b986395ecc | ||
|
|
020ea3f412 | ||
|
|
51f1fe5f9a | ||
|
|
813bc4d949 | ||
|
|
6c67f1f525 | ||
|
|
c9f17dd85d | ||
|
|
b33b506b90 | ||
|
|
f399749ee2 | ||
|
|
2ed4047840 | ||
|
|
4e25dc9291 | ||
|
|
a18b30b765 | ||
|
|
856470203a | ||
|
|
b124c9f7e3 | ||
|
|
b550dc00ed | ||
|
|
21c02232a5 | ||
|
|
a791c03dc1 | ||
|
|
800a315383 | ||
|
|
8449c6c365 | ||
|
|
58e03190ac | ||
|
|
fb74dc28e1 | ||
|
|
e4dccfd49b | ||
|
|
57f4b4eb7f | ||
|
|
adbeb94c2b | ||
|
|
a3d4be4eaf | ||
|
|
6ff495fd9b | ||
|
|
ad37461ab2 | ||
|
|
d9c27da529 | ||
|
|
3fb6acd29e | ||
|
|
77b7cef5a7 | ||
|
|
2c187461cc | ||
|
|
13a12c6402 | ||
|
|
362ecbd1cb | ||
|
|
7025e92080 | ||
|
|
f81d0d8c98 | ||
|
|
508937f3d1 | ||
|
|
8580a5795a | ||
|
|
9b53bd9871 | ||
|
|
5fc07e3979 | ||
|
|
2ebc2ca885 | ||
|
|
c2a90b706f | ||
|
|
9ffb434315 | ||
|
|
ff18cfef96 | ||
|
|
03704f712b | ||
|
|
9b332d88c1 | ||
|
|
33309480d4 | ||
|
|
098eadca0a | ||
|
|
3b810c305a | ||
|
|
3968efb5f1 | ||
|
|
12c629a1d2 | ||
|
|
8d2dff2e48 | ||
|
|
c39f9c561c | ||
|
|
173f9f7bb0 | ||
|
|
28a1f90938 | ||
|
|
673fb06c75 | ||
|
|
f28ba57b81 | ||
|
|
f3a2ec1fb2 | ||
|
|
1d42c4f6de | ||
|
|
ada83564d8 | ||
|
|
b18dece145 | ||
|
|
63a08560ca | ||
|
|
8ac8a47c99 | ||
|
|
12c4b5a632 | ||
|
|
25c5e3b17f | ||
|
|
8eb233c2ea | ||
|
|
50fc93f742 | ||
|
|
ab45a8a737 | ||
|
|
dfafb141cc | ||
|
|
4e32d2ed98 | ||
|
|
fa69918124 | ||
|
|
cbbb2b1be0 | ||
|
|
cf2d763fa1 | ||
|
|
2dd1d682ac | ||
|
|
4cb1084c02 | ||
|
|
8d1b3b3994 | ||
|
|
b39d7a6519 | ||
|
|
b0910e359e | ||
|
|
44e027e516 | ||
|
|
a10f42a3aa | ||
|
|
efd4c1b95d | ||
|
|
f8b4f692f1 | ||
|
|
80a3ae6386 | ||
|
|
48d38c1e2c | ||
|
|
553fb5be3b | ||
|
|
efa917d9f3 | ||
|
|
bd3bc917f8 | ||
|
|
ed5d6f3e22 | ||
|
|
a8e4da0b11 | ||
|
|
1dd60242de | ||
|
|
76611c3f46 | ||
|
|
5efaf0c328 | ||
|
|
0aa23933ea | ||
|
|
21f3c12d85 | ||
|
|
7d5ed0cd8d | ||
|
|
d9960d5ba0 | ||
|
|
91fa6b2295 | ||
|
|
76f774e22d | ||
|
|
f4f7618173 | ||
|
|
66f16469f9 | ||
|
|
1845b1c656 | ||
|
|
e192ffe964 | ||
|
|
2bf77cc8f6 | ||
|
|
5e33ca56fd | ||
|
|
7c39c810eb | ||
|
|
a7792ebcae | ||
|
|
83ee3788e1 | ||
|
|
ae719b86d3 | ||
|
|
dd722f8b3f | ||
|
|
30190a5feb | ||
|
|
afb6e0e41b | ||
|
|
5523557226 | ||
|
|
b64707f53b | ||
|
|
0b113f371f | ||
|
|
b4c894c1ba | ||
|
|
92281a4ede | ||
|
|
e80642fc12 | ||
|
|
640ce4988f | ||
|
|
a422855ea7 | ||
|
|
108f90586c | ||
|
|
519d1dbc34 | ||
|
|
3d44758e5a | ||
|
|
97bc94a7f6 | ||
|
|
34619f2504 | ||
|
|
3509de9c5f | ||
|
|
459d0da010 | ||
|
|
8637d606a4 | ||
|
|
8456b8275e | ||
|
|
3c88786bb0 | ||
|
|
46ba8a28fe | ||
|
|
5ecde3cf39 | ||
|
|
620fb26823 | ||
|
|
6b6b213cf5 | ||
|
|
f61086b43c | ||
|
|
176fd2b6e4 | ||
|
|
2df730438d | ||
|
|
5d79bfc531 | ||
|
|
51ef35ab55 | ||
|
|
330a3215bc | ||
|
|
85c2ceacde | ||
|
|
70d5c624e8 | ||
|
|
8e4fda160d | ||
|
|
072b1c442c | ||
|
|
294e03ecf5 | ||
|
|
550f90a75e | ||
|
|
d67dcfe3c4 | ||
|
|
0fd2f715bb | ||
|
|
807462b191 | ||
|
|
19c4226d3d | ||
|
|
d02c306f1e | ||
|
|
cfd26f444c | ||
|
|
2c3024716b | ||
|
|
a12f5de68d | ||
|
|
51c5f2bfc9 | ||
|
|
73ff54143d | ||
|
|
08b136528e | ||
|
|
6b8a589447 | ||
|
|
ffeabc9642 | ||
|
|
3cbdf818a7 | ||
|
|
c46888f8f7 | ||
|
|
2ae65d2fdb | ||
|
|
bd834c87e0 | ||
|
|
dc8b37a524 | ||
|
|
617a895af5 | ||
|
|
1af1048c58 | ||
|
|
f07ba87e51 | ||
|
|
e66558a883 | ||
|
|
510314d344 | ||
|
|
37b951859c | ||
|
|
9494fc9668 | ||
|
|
8d01f35eb9 | ||
|
|
1020a32d76 | ||
|
|
17a2606591 | ||
|
|
ccb9f1e42d | ||
|
|
3e4e9a2ddc | ||
|
|
4caebfbd0e | ||
|
|
37c377a1b6 | ||
|
|
bd182c0a3e | ||
|
|
406c26cc72 | ||
|
|
9bd1ce436a | ||
|
|
f69ad4eff6 | ||
|
|
6fe0599cc2 | ||
|
|
e6f8bc720f | ||
|
|
fbd60fc000 | ||
|
|
61d628d654 | ||
|
|
3d92375d12 | ||
|
|
cdbe70b2a7 | ||
|
|
f6426ca183 | ||
|
|
e5f7a8442d | ||
|
|
e67e0395df | ||
|
|
148f669a25 | ||
|
|
f1eaa6a264 | ||
|
|
da4c8c9550 | ||
|
|
bcde2790a4 | ||
|
|
9ebeb413e4 | ||
|
|
6d40b882a4 | ||
|
|
9fe0a154f1 | ||
|
|
cb52c9af00 | ||
|
|
6bf8338038 | ||
|
|
b0f4174e47 | ||
|
|
3865dde0b8 | ||
|
|
811c980821 | ||
|
|
cf5f65b68e | ||
|
|
c38f2a3f2e | ||
|
|
16c2ff97cc | ||
|
|
32043463a8 | ||
|
|
724e9b1313 | ||
|
|
2e6f00aef2 | ||
|
|
e0b9812fc5 | ||
|
|
e4fdf33158 | ||
|
|
6e814d7ebd | ||
|
|
1e37d00d6c | ||
|
|
87ea3ba65d | ||
|
|
dedf3d3983 | ||
|
|
2df7dcfdeb | ||
|
|
1506e65558 | ||
|
|
808c86663c | ||
|
|
92431a4238 | ||
|
|
285120684c | ||
|
|
77fef8732b | ||
|
|
7775c725f3 | ||
|
|
c61096239c | ||
|
|
c5fe970646 | ||
|
|
c57cd8b23e | ||
|
|
c14ce956ad | ||
|
|
095dc4d9cc | ||
|
|
2e255812ae | ||
|
|
896b8c3b54 | ||
|
|
58dd07bbdf | ||
|
|
b13370ac0d | ||
|
|
f847e3287c | ||
|
|
56c1e078f2 | ||
|
|
afc05659ed | ||
|
|
b04d239926 | ||
|
|
dc1caa41b2 | ||
|
|
ceb0ce5634 | ||
|
|
fb89213d4d | ||
|
|
d8628d481d | ||
|
|
a14551b151 | ||
|
|
de33a6a241 | ||
|
|
28eec6ce1b | ||
|
|
c9a723128a | ||
|
|
da82e52613 | ||
|
|
c9d73b6135 | ||
|
|
b7ed99426b | ||
|
|
97f0747e10 | ||
|
|
abf12db788 | ||
|
|
bdfc376951 | ||
|
|
b40a3684ae | ||
|
|
86ef16dbeb | ||
|
|
39b5031ab5 | ||
|
|
94decc753b | ||
|
|
991891625a | ||
|
|
69314e6832 | ||
|
|
dbeb841b5a | ||
|
|
4eae037fee | ||
|
|
b5a63b39d3 | ||
|
|
6419f9a253 | ||
|
|
31c99caa65 | ||
|
|
d835e97490 | ||
|
|
baf4b8381f | ||
|
|
9b45b6888b | ||
|
|
7179ce9c58 | ||
|
|
921aef9934 | ||
|
|
e7a7bb83c1 | ||
|
|
5c2a3a2779 | ||
|
|
b2960b9e7f | ||
|
|
5713f9782a | ||
|
|
60e340d356 | ||
|
|
80d82c5b2b | ||
|
|
433eeabfa5 | ||
|
|
faa781b71f | ||
|
|
c233df720a | ||
|
|
7ff4f79d30 | ||
|
|
60909655d3 | ||
|
|
03e46cd026 | ||
|
|
e95683a0fb | ||
|
|
13353ae36d | ||
|
|
1a40f18bdd | ||
|
|
90e6380383 | ||
|
|
8bfaa7fe0a | ||
|
|
c9135a63cd | ||
|
|
452263eaa5 | ||
|
|
8aa94ea09a | ||
|
|
258ba71363 | ||
|
|
b8626ea3c6 | ||
|
|
6534757d85 | ||
|
|
8e94ea3154 | ||
|
|
b113190563 | ||
|
|
358b7f50a7 | ||
|
|
f47e2f4e82 | ||
|
|
a7eea9546f | ||
|
|
9874d47d7f | ||
|
|
c2f3e2e263 | ||
|
|
e18f27f5f7 | ||
|
|
df6daf0d8f | ||
|
|
e9d46f0bfc | ||
|
|
42fd74b77b | ||
|
|
c55ea56c5e | ||
|
|
1e01cd34f7 | ||
|
|
e2fa5c1b7c | ||
|
|
fc0984d286 | ||
|
|
8b3dcd41f7 | ||
|
|
8f2f5310e2 | ||
|
|
edb4f0342c | ||
|
|
ea17abb92a | ||
|
|
35a40a8e62 | ||
|
|
d494bf45b2 | ||
|
|
8bf4a5cbff | ||
|
|
58c2c82a30 | ||
|
|
11edaa441d | ||
|
|
a5e953b191 | ||
|
|
506ae12a8c | ||
|
|
0310c5cbe0 | ||
|
|
053e1af7ff | ||
|
|
7e24adbdd0 | ||
|
|
621df422a7 | ||
|
|
0a34b5c691 | ||
|
|
e0bc3dd51f | ||
|
|
dacecd24ba | ||
|
|
05105743e9 | ||
|
|
9e1fe9a85e | ||
|
|
d71ce51901 | ||
|
|
be668ee26d | ||
|
|
cae5294b4e | ||
|
|
cd777f79ef | ||
|
|
8b9e21e3f5 | ||
|
|
2a61aee562 | ||
|
|
40ce8a8833 | ||
|
|
7713ff8c5c | ||
|
|
70371a4344 | ||
|
|
e514de76ed | ||
|
|
dd62cfcc22 | ||
|
|
09690f1b38 | ||
|
|
380ba9f1c1 | ||
|
|
c3e9380fb4 | ||
|
|
e3ebc253fa | ||
|
|
c6c7c84355 | ||
|
|
28f50cb7cf | ||
|
|
3e152fec74 | ||
|
|
2db2791805 | ||
|
|
9ec2d7f8ff | ||
|
|
4a084ce34c | ||
|
|
3502df2174 | ||
|
|
fa1e25abef | ||
|
|
217ba8dd4d | ||
|
|
405f4613d8 | ||
|
|
cba512068b | ||
|
|
1c99ea23d1 | ||
|
|
c4308b216f | ||
|
|
aafd2d8525 | ||
|
|
a574ec6023 | ||
|
|
e429455f4d | ||
|
|
7692eeb9a0 | ||
|
|
a099f5a804 | ||
|
|
ca0bc767fe | ||
|
|
4ba9288935 | ||
|
|
e923ec6d36 | ||
|
|
851d99d99e | ||
|
|
f608e653ca | ||
|
|
72e076b694 | ||
|
|
6cf37c4abe | ||
|
|
fc204773d6 | ||
|
|
2bc5cb240f | ||
|
|
67028d6ea6 | ||
|
|
d22a5057b9 | ||
|
|
75a20194c5 | ||
|
|
7fe81fe62e | ||
|
|
345ddc7234 | ||
|
|
d167d4864f | ||
|
|
bf504912a4 | ||
|
|
a7fb8ae915 | ||
|
|
d9b7a2688f | ||
|
|
c0299dba88 | ||
|
|
c3ecdb4746 | ||
|
|
c17676a9be | ||
|
|
ed8e32cc92 | ||
|
|
2406b28e64 | ||
|
|
2216e5a13f | ||
|
|
5bf3a308d5 | ||
|
|
53ea31c69a | ||
|
|
c1c2b5bf52 | ||
|
|
af018c7b0b | ||
|
|
0a1ca0600f | ||
|
|
cd7c62818b | ||
|
|
37d06bcce8 | ||
|
|
9745718467 | ||
|
|
ab44cc31e2 | ||
|
|
dce3e1efa6 | ||
|
|
159dfb5acb | ||
|
|
844646dc50 | ||
|
|
01fc8f2209 | ||
|
|
43e1d4440e | ||
|
|
466849efe8 | ||
|
|
db0fad6826 | ||
|
|
dd5e6559dd | ||
|
|
7c9d652d9b | ||
|
|
dc9e6c37fe | ||
|
|
01fe9477f4 | ||
|
|
97e3dae6f4 | ||
|
|
e8e7888a23 | ||
|
|
b02b8d016c | ||
|
|
a079bac153 | ||
|
|
3a55a64e1c | ||
|
|
fa5a85439f | ||
|
|
81034596a8 | ||
|
|
0968cdf340 | ||
|
|
d9e4009e33 | ||
|
|
02387fd227 | ||
|
|
fb3713bc25 | ||
|
|
f6d63082c0 | ||
|
|
33e1c42599 | ||
|
|
1b75dc8bcd | ||
|
|
3d02580c09 | ||
|
|
8458233a31 | ||
|
|
cb0ddbf863 | ||
|
|
dcc4581220 | ||
|
|
50b8f19cb5 | ||
|
|
f3e201f983 | ||
|
|
b14c24960b | ||
|
|
b6e3453f49 | ||
|
|
ed4870cdb4 | ||
|
|
5fbee8c824 | ||
|
|
3868c04e99 | ||
|
|
409c1d5aa2 | ||
|
|
20710f5232 | ||
|
|
870882f567 | ||
|
|
e1e67b2c9e | ||
|
|
eac3abdca9 | ||
|
|
ebd8e63276 | ||
|
|
839d17e7bd | ||
|
|
7be5c31bc6 | ||
|
|
9e4a7d5871 | ||
|
|
ff8b9aa439 | ||
|
|
ccc0889803 | ||
|
|
07f118caec | ||
|
|
58af62f388 | ||
|
|
040cd23e4a | ||
|
|
0324764a83 | ||
|
|
679e35fd46 | ||
|
|
49e0d54c76 | ||
|
|
7506852a99 | ||
|
|
bcbfb04992 | ||
|
|
5cd72f2431 | ||
|
|
eabca8439f | ||
|
|
ea1fffeebf | ||
|
|
6d58065909 | ||
|
|
47b0543461 | ||
|
|
8215c605b4 | ||
|
|
d7e949193f | ||
|
|
f64cf9187a | ||
|
|
b54d85d862 | ||
|
|
f419c18056 | ||
|
|
0ec17b6026 | ||
|
|
838978b869 | ||
|
|
8186253707 | ||
|
|
2316d843d7 | ||
|
|
9d58f11a60 | ||
|
|
7b18006193 | ||
|
|
9e48fc0c83 | ||
|
|
8e827e32ac | ||
|
|
c5c0e70e23 | ||
|
|
ec61f5e9d3 | ||
|
|
d57cced17b | ||
|
|
54a350be79 | ||
|
|
d6dbf0e0a6 | ||
|
|
0d887ad815 | ||
|
|
d4a5f8390e | ||
|
|
ab5d450d3c | ||
|
|
23c37fa506 | ||
|
|
63209c2646 | ||
|
|
f0dabd1446 | ||
|
|
552377c76f | ||
|
|
e7cd03325b | ||
|
|
decb3c178e | ||
|
|
f6d647d6c3 | ||
|
|
bf4a7b6ce8 | ||
|
|
8e2c85d14d | ||
|
|
1fbf8da79f | ||
|
|
a75309919e | ||
|
|
0ece395c24 | ||
|
|
b6391fe011 | ||
|
|
9a6af9c431 | ||
|
|
fa1cbb0746 | ||
|
|
68e1be3cf5 | ||
|
|
9abc4868d6 | ||
|
|
23991c99c3 | ||
|
|
cc0177be87 | ||
|
|
37b3e96b04 | ||
|
|
85214bdf81 | ||
|
|
fbbea9e6e2 | ||
|
|
7741483894 | ||
|
|
2f432e812c | ||
|
|
cad8970a57 | ||
|
|
4d7aed84ec | ||
|
|
00ed7c9424 | ||
|
|
d9bd75e683 | ||
|
|
93d8bafb24 | ||
|
|
c19a88fee9 | ||
|
|
0a331ea72e | ||
|
|
7d27b11190 | ||
|
|
eedfec015e | ||
|
|
ffc343a2bc | ||
|
|
e5aa605742 | ||
|
|
8b181ed818 | ||
|
|
f5a349558e | ||
|
|
b9b75ddcf5 | ||
|
|
a39720e94a | ||
|
|
2820feb02a | ||
|
|
8fc805d2e2 | ||
|
|
d54151e7c4 | ||
|
|
21a0a64648 | ||
|
|
20707fac4a | ||
|
|
e6ef0fc26c | ||
|
|
c157816017 | ||
|
|
eba5d19377 | ||
|
|
ad14d09a2b | ||
|
|
f3bcc651c7 | ||
|
|
e8602b81fa | ||
|
|
0f32109993 | ||
|
|
a17ccca615 | ||
|
|
7a1b238035 | ||
|
|
e1534a3200 | ||
|
|
9fec615dca | ||
|
|
ef02893f2f | ||
|
|
7cf4611d7c | ||
|
|
d028005aa6 | ||
|
|
1d23148e6d | ||
|
|
e416ee72ca | ||
|
|
2e902dee53 | ||
|
|
f6879da6c9 | ||
|
|
ae20a3ad3f | ||
|
|
c706926ee3 | ||
|
|
223e6c7590 | ||
|
|
825864032a | ||
|
|
06733ec21a | ||
|
|
9f7c619e4f | ||
|
|
3f5e3212fe | ||
|
|
20d05492d2 | ||
|
|
ae7ea33b75 | ||
|
|
263e984bf4 | ||
|
|
58f3abe3c6 | ||
|
|
d576416953 | ||
|
|
e3d1bb271f | ||
|
|
2df635693d | ||
|
|
40b4adc9cc | ||
|
|
0c971b4415 | ||
|
|
f2d37da4ca |
@@ -1,5 +1,21 @@
|
||||
---
|
||||
Language: Cpp
|
||||
BreakBeforeBraces: Custom
|
||||
BraceWrapping:
|
||||
AfterClass: true
|
||||
AfterControlStatement: true
|
||||
AfterEnum: false
|
||||
AfterFunction: true
|
||||
AfterNamespace: false
|
||||
AfterObjCDeclaration: true
|
||||
AfterStruct: true
|
||||
AfterUnion: true
|
||||
BeforeCatch: true
|
||||
BeforeElse: true
|
||||
IndentBraces: false
|
||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||
MaxEmptyLinesToKeep: 1
|
||||
---
|
||||
Language: Cpp
|
||||
AccessModifierOffset: -4
|
||||
AlignAfterOpenBracket: AlwaysBreak
|
||||
AlignConsecutiveAssignments: false
|
||||
@@ -18,48 +34,41 @@ AlwaysBreakBeforeMultilineStrings: true
|
||||
AlwaysBreakTemplateDeclarations: true
|
||||
BinPackArguments: false
|
||||
BinPackParameters: false
|
||||
BraceWrapping:
|
||||
AfterClass: true
|
||||
AfterControlStatement: true
|
||||
AfterEnum: false
|
||||
AfterFunction: true
|
||||
AfterNamespace: false
|
||||
AfterObjCDeclaration: true
|
||||
AfterStruct: true
|
||||
AfterUnion: true
|
||||
BeforeCatch: true
|
||||
BeforeElse: true
|
||||
IndentBraces: false
|
||||
BreakBeforeBinaryOperators: false
|
||||
BreakBeforeBraces: Custom
|
||||
BreakBeforeTernaryOperators: true
|
||||
BreakConstructorInitializersBeforeComma: true
|
||||
ColumnLimit: 80
|
||||
CommentPragmas: '^ IWYU pragma:'
|
||||
ColumnLimit: 120
|
||||
CommentPragmas: "^ IWYU pragma:"
|
||||
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
||||
ConstructorInitializerIndentWidth: 4
|
||||
ContinuationIndentWidth: 4
|
||||
Cpp11BracedListStyle: true
|
||||
DerivePointerAlignment: false
|
||||
DisableFormat: false
|
||||
DisableFormat: false
|
||||
ExperimentalAutoDetectBinPacking: false
|
||||
ForEachMacros: [ Q_FOREACH, BOOST_FOREACH ]
|
||||
ForEachMacros: [Q_FOREACH, BOOST_FOREACH]
|
||||
IncludeBlocks: Regroup
|
||||
IncludeCategories:
|
||||
- Regex: '^<(BeastConfig)'
|
||||
Priority: 0
|
||||
- Regex: '^<(ripple)/'
|
||||
Priority: 2
|
||||
- Regex: '^<(boost)/'
|
||||
Priority: 3
|
||||
- Regex: '.*'
|
||||
Priority: 4
|
||||
IncludeIsMainRegex: '$'
|
||||
- Regex: "^<(test)/"
|
||||
Priority: 0
|
||||
- Regex: "^<(xrpld)/"
|
||||
Priority: 1
|
||||
- Regex: "^<(xrpl)/"
|
||||
Priority: 2
|
||||
- Regex: "^<(boost)/"
|
||||
Priority: 3
|
||||
- Regex: "^.*/"
|
||||
Priority: 4
|
||||
- Regex: '^.*\.h'
|
||||
Priority: 5
|
||||
- Regex: ".*"
|
||||
Priority: 6
|
||||
IncludeIsMainRegex: "$"
|
||||
IndentCaseLabels: true
|
||||
IndentFunctionDeclarationAfterType: false
|
||||
IndentWidth: 4
|
||||
IndentRequiresClause: true
|
||||
IndentWidth: 4
|
||||
IndentWrappedFunctionNames: false
|
||||
KeepEmptyLinesAtTheStartOfBlocks: false
|
||||
MaxEmptyLinesToKeep: 1
|
||||
NamespaceIndentation: None
|
||||
ObjCSpaceAfterProperty: false
|
||||
ObjCSpaceBeforeProtocolList: false
|
||||
@@ -70,18 +79,25 @@ PenaltyBreakString: 1000
|
||||
PenaltyExcessCharacter: 1000000
|
||||
PenaltyReturnTypeOnItsOwnLine: 200
|
||||
PointerAlignment: Left
|
||||
ReflowComments: true
|
||||
SortIncludes: true
|
||||
ReflowComments: true
|
||||
RequiresClausePosition: OwnLine
|
||||
SortIncludes: true
|
||||
SpaceAfterCStyleCast: false
|
||||
SpaceBeforeAssignmentOperators: true
|
||||
SpaceBeforeParens: ControlStatements
|
||||
SpaceInEmptyParentheses: false
|
||||
SpacesBeforeTrailingComments: 2
|
||||
SpacesInAngles: false
|
||||
SpacesInAngles: false
|
||||
SpacesInContainerLiterals: true
|
||||
SpacesInCStyleCastParentheses: false
|
||||
SpacesInParentheses: false
|
||||
SpacesInSquareBrackets: false
|
||||
Standard: Cpp11
|
||||
TabWidth: 8
|
||||
UseTab: Never
|
||||
Standard: Cpp11
|
||||
TabWidth: 8
|
||||
UseTab: Never
|
||||
QualifierAlignment: Right
|
||||
---
|
||||
Language: Proto
|
||||
BasedOnStyle: Google
|
||||
ColumnLimit: 0
|
||||
IndentWidth: 2
|
||||
|
||||
247
.cmake-format.yaml
Normal file
247
.cmake-format.yaml
Normal file
@@ -0,0 +1,247 @@
|
||||
_help_parse: Options affecting listfile parsing
|
||||
parse:
|
||||
_help_additional_commands:
|
||||
- Specify structure for custom cmake functions
|
||||
additional_commands:
|
||||
target_protobuf_sources:
|
||||
pargs:
|
||||
- target
|
||||
- prefix
|
||||
kwargs:
|
||||
PROTOS: "*"
|
||||
LANGUAGE: cpp
|
||||
IMPORT_DIRS: "*"
|
||||
GENERATE_EXTENSIONS: "*"
|
||||
PLUGIN: "*"
|
||||
_help_override_spec:
|
||||
- Override configurations per-command where available
|
||||
override_spec: {}
|
||||
_help_vartags:
|
||||
- Specify variable tags.
|
||||
vartags: []
|
||||
_help_proptags:
|
||||
- Specify property tags.
|
||||
proptags: []
|
||||
_help_format: Options affecting formatting.
|
||||
format:
|
||||
_help_disable:
|
||||
- Disable formatting entirely, making cmake-format a no-op
|
||||
disable: false
|
||||
_help_line_width:
|
||||
- How wide to allow formatted cmake files
|
||||
line_width: 120
|
||||
_help_tab_size:
|
||||
- How many spaces to tab for indent
|
||||
tab_size: 4
|
||||
_help_use_tabchars:
|
||||
- If true, lines are indented using tab characters (utf-8
|
||||
- 0x09) instead of <tab_size> space characters (utf-8 0x20).
|
||||
- In cases where the layout would require a fractional tab
|
||||
- character, the behavior of the fractional indentation is
|
||||
- governed by <fractional_tab_policy>
|
||||
use_tabchars: false
|
||||
_help_fractional_tab_policy:
|
||||
- If <use_tabchars> is True, then the value of this variable
|
||||
- indicates how fractional indentions are handled during
|
||||
- whitespace replacement. If set to 'use-space', fractional
|
||||
- indentation is left as spaces (utf-8 0x20). If set to
|
||||
- "`round-up` fractional indentation is replaced with a single"
|
||||
- tab character (utf-8 0x09) effectively shifting the column
|
||||
- to the next tabstop
|
||||
fractional_tab_policy: use-space
|
||||
_help_max_subgroups_hwrap:
|
||||
- If an argument group contains more than this many sub-groups
|
||||
- (parg or kwarg groups) then force it to a vertical layout.
|
||||
max_subgroups_hwrap: 4
|
||||
_help_max_pargs_hwrap:
|
||||
- If a positional argument group contains more than this many
|
||||
- arguments, then force it to a vertical layout.
|
||||
max_pargs_hwrap: 5
|
||||
_help_max_rows_cmdline:
|
||||
- If a cmdline positional group consumes more than this many
|
||||
- lines without nesting, then invalidate the layout (and nest)
|
||||
max_rows_cmdline: 2
|
||||
_help_separate_ctrl_name_with_space:
|
||||
- If true, separate flow control names from their parentheses
|
||||
- with a space
|
||||
separate_ctrl_name_with_space: true
|
||||
_help_separate_fn_name_with_space:
|
||||
- If true, separate function names from parentheses with a
|
||||
- space
|
||||
separate_fn_name_with_space: false
|
||||
_help_dangle_parens:
|
||||
- If a statement is wrapped to more than one line, than dangle
|
||||
- the closing parenthesis on its own line.
|
||||
dangle_parens: false
|
||||
_help_dangle_align:
|
||||
- If the trailing parenthesis must be 'dangled' on its on
|
||||
- "line, then align it to this reference: `prefix`: the start"
|
||||
- "of the statement, `prefix-indent`: the start of the"
|
||||
- "statement, plus one indentation level, `child`: align to"
|
||||
- the column of the arguments
|
||||
dangle_align: prefix
|
||||
_help_min_prefix_chars:
|
||||
- If the statement spelling length (including space and
|
||||
- parenthesis) is smaller than this amount, then force reject
|
||||
- nested layouts.
|
||||
min_prefix_chars: 18
|
||||
_help_max_prefix_chars:
|
||||
- If the statement spelling length (including space and
|
||||
- parenthesis) is larger than the tab width by more than this
|
||||
- amount, then force reject un-nested layouts.
|
||||
max_prefix_chars: 10
|
||||
_help_max_lines_hwrap:
|
||||
- If a candidate layout is wrapped horizontally but it exceeds
|
||||
- this many lines, then reject the layout.
|
||||
max_lines_hwrap: 2
|
||||
_help_line_ending:
|
||||
- What style line endings to use in the output.
|
||||
line_ending: unix
|
||||
_help_command_case:
|
||||
- Format command names consistently as 'lower' or 'upper' case
|
||||
command_case: canonical
|
||||
_help_keyword_case:
|
||||
- Format keywords consistently as 'lower' or 'upper' case
|
||||
keyword_case: unchanged
|
||||
_help_always_wrap:
|
||||
- A list of command names which should always be wrapped
|
||||
always_wrap: []
|
||||
_help_enable_sort:
|
||||
- If true, the argument lists which are known to be sortable
|
||||
- will be sorted lexicographicall
|
||||
enable_sort: true
|
||||
_help_autosort:
|
||||
- If true, the parsers may infer whether or not an argument
|
||||
- list is sortable (without annotation).
|
||||
autosort: true
|
||||
_help_require_valid_layout:
|
||||
- By default, if cmake-format cannot successfully fit
|
||||
- everything into the desired linewidth it will apply the
|
||||
- last, most aggressive attempt that it made. If this flag is
|
||||
- True, however, cmake-format will print error, exit with non-
|
||||
- zero status code, and write-out nothing
|
||||
require_valid_layout: false
|
||||
_help_layout_passes:
|
||||
- A dictionary mapping layout nodes to a list of wrap
|
||||
- decisions. See the documentation for more information.
|
||||
layout_passes: {}
|
||||
_help_markup: Options affecting comment reflow and formatting.
|
||||
markup:
|
||||
_help_bullet_char:
|
||||
- What character to use for bulleted lists
|
||||
bullet_char: "-"
|
||||
_help_enum_char:
|
||||
- What character to use as punctuation after numerals in an
|
||||
- enumerated list
|
||||
enum_char: .
|
||||
_help_first_comment_is_literal:
|
||||
- If comment markup is enabled, don't reflow the first comment
|
||||
- block in each listfile. Use this to preserve formatting of
|
||||
- your copyright/license statements.
|
||||
first_comment_is_literal: false
|
||||
_help_literal_comment_pattern:
|
||||
- If comment markup is enabled, don't reflow any comment block
|
||||
- which matches this (regex) pattern. Default is `None`
|
||||
- (disabled).
|
||||
literal_comment_pattern: null
|
||||
_help_fence_pattern:
|
||||
- Regular expression to match preformat fences in comments
|
||||
- default= ``r'^\s*([`~]{3}[`~]*)(.*)$'``
|
||||
fence_pattern: ^\s*([`~]{3}[`~]*)(.*)$
|
||||
_help_ruler_pattern:
|
||||
- Regular expression to match rulers in comments default=
|
||||
- '``r''^\s*[^\w\s]{3}.*[^\w\s]{3}$''``'
|
||||
ruler_pattern: ^\s*[^\w\s]{3}.*[^\w\s]{3}$
|
||||
_help_explicit_trailing_pattern:
|
||||
- If a comment line matches starts with this pattern then it
|
||||
- is explicitly a trailing comment for the preceding
|
||||
- argument. Default is '#<'
|
||||
explicit_trailing_pattern: "#<"
|
||||
_help_hashruler_min_length:
|
||||
- If a comment line starts with at least this many consecutive
|
||||
- hash characters, then don't lstrip() them off. This allows
|
||||
- for lazy hash rulers where the first hash char is not
|
||||
- separated by space
|
||||
hashruler_min_length: 10
|
||||
_help_canonicalize_hashrulers:
|
||||
- If true, then insert a space between the first hash char and
|
||||
- remaining hash chars in a hash ruler, and normalize its
|
||||
- length to fill the column
|
||||
canonicalize_hashrulers: true
|
||||
_help_enable_markup:
|
||||
- enable comment markup parsing and reflow
|
||||
enable_markup: false
|
||||
_help_lint: Options affecting the linter
|
||||
lint:
|
||||
_help_disabled_codes:
|
||||
- a list of lint codes to disable
|
||||
disabled_codes: []
|
||||
_help_function_pattern:
|
||||
- regular expression pattern describing valid function names
|
||||
function_pattern: "[0-9a-z_]+"
|
||||
_help_macro_pattern:
|
||||
- regular expression pattern describing valid macro names
|
||||
macro_pattern: "[0-9A-Z_]+"
|
||||
_help_global_var_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- variables with global (cache) scope
|
||||
global_var_pattern: "[A-Z][0-9A-Z_]+"
|
||||
_help_internal_var_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- variables with global scope (but internal semantic)
|
||||
internal_var_pattern: _[A-Z][0-9A-Z_]+
|
||||
_help_local_var_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- variables with local scope
|
||||
local_var_pattern: "[a-z][a-z0-9_]+"
|
||||
_help_private_var_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- privatedirectory variables
|
||||
private_var_pattern: _[0-9a-z_]+
|
||||
_help_public_var_pattern:
|
||||
- regular expression pattern describing valid names for public
|
||||
- directory variables
|
||||
public_var_pattern: "[A-Z][0-9A-Z_]+"
|
||||
_help_argument_var_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- function/macro arguments and loop variables.
|
||||
argument_var_pattern: "[a-z][a-z0-9_]+"
|
||||
_help_keyword_pattern:
|
||||
- regular expression pattern describing valid names for
|
||||
- keywords used in functions or macros
|
||||
keyword_pattern: "[A-Z][0-9A-Z_]+"
|
||||
_help_max_conditionals_custom_parser:
|
||||
- In the heuristic for C0201, how many conditionals to match
|
||||
- within a loop in before considering the loop a parser.
|
||||
max_conditionals_custom_parser: 2
|
||||
_help_min_statement_spacing:
|
||||
- Require at least this many newlines between statements
|
||||
min_statement_spacing: 1
|
||||
_help_max_statement_spacing:
|
||||
- Require no more than this many newlines between statements
|
||||
max_statement_spacing: 2
|
||||
max_returns: 6
|
||||
max_branches: 12
|
||||
max_arguments: 5
|
||||
max_localvars: 15
|
||||
max_statements: 50
|
||||
_help_encode: Options affecting file encoding
|
||||
encode:
|
||||
_help_emit_byteorder_mark:
|
||||
- If true, emit the unicode byte-order mark (BOM) at the start
|
||||
- of the file
|
||||
emit_byteorder_mark: false
|
||||
_help_input_encoding:
|
||||
- Specify the encoding of the input file. Defaults to utf-8
|
||||
input_encoding: utf-8
|
||||
_help_output_encoding:
|
||||
- Specify the encoding of the output file. Defaults to utf-8.
|
||||
- Note that cmake only claims to support utf-8 so be careful
|
||||
- when using anything else
|
||||
output_encoding: utf-8
|
||||
_help_misc: Miscellaneous configurations options.
|
||||
misc:
|
||||
_help_per_command:
|
||||
- A dictionary containing any per-command configuration
|
||||
- overrides. Currently only `command_case` is supported.
|
||||
per_command: {}
|
||||
11
.codecov.yml
11
.codecov.yml
@@ -7,13 +7,13 @@ comment:
|
||||
show_carryforward_flags: false
|
||||
|
||||
coverage:
|
||||
range: "60..80"
|
||||
range: "70..85"
|
||||
precision: 1
|
||||
round: nearest
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 60%
|
||||
target: 75%
|
||||
threshold: 2%
|
||||
patch:
|
||||
default:
|
||||
@@ -27,11 +27,12 @@ github_checks:
|
||||
parsers:
|
||||
cobertura:
|
||||
partials_as_hits: true
|
||||
handle_missing_conditions : true
|
||||
handle_missing_conditions: true
|
||||
|
||||
slack_app: false
|
||||
|
||||
ignore:
|
||||
- "src/test/"
|
||||
- "src/ripple/beast/test/"
|
||||
- "src/ripple/beast/unit_test/"
|
||||
- "src/tests/"
|
||||
- "include/xrpl/beast/test/"
|
||||
- "include/xrpl/beast/unit_test/"
|
||||
|
||||
@@ -6,3 +6,11 @@ e2384885f5f630c8f0ffe4bf21a169b433a16858
|
||||
241b9ddde9e11beb7480600fd5ed90e1ef109b21
|
||||
760f16f56835663d9286bd29294d074de26a7ba6
|
||||
0eebe6a5f4246fced516d52b83ec4e7f47373edd
|
||||
2189cc950c0cebb89e4e2fa3b2d8817205bf7cef
|
||||
b9d007813378ad0ff45660dc07285b823c7e9855
|
||||
fe9a5365b8a52d4acc42eb27369247e6f238a4f9
|
||||
9a93577314e6a8d4b4a8368cc9d2b15a5d8303e8
|
||||
552377c76f55b403a1c876df873a23d780fcc81c
|
||||
97f0747e103f13e26e45b731731059b32f7679ac
|
||||
b13370ac0d207217354f1fc1c29aef87769fb8a1
|
||||
896b8c3b54a22b0497cb0d1ce95e1095f9a227ce
|
||||
|
||||
5
.gitattributes
vendored
5
.gitattributes
vendored
@@ -1,9 +1,6 @@
|
||||
# Set default behaviour, in case users don't have core.autocrlf set.
|
||||
#* text=auto
|
||||
|
||||
# These annoying files
|
||||
rippled.1 binary
|
||||
LICENSE binary
|
||||
# cspell: disable
|
||||
|
||||
# Visual Studio
|
||||
*.sln text eol=crlf
|
||||
|
||||
19
.github/ISSUE_TEMPLATE/bug_report.md
vendored
19
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,31 +1,36 @@
|
||||
---
|
||||
name: Bug Report
|
||||
about: Create a report to help us improve rippled
|
||||
title: "[Title with short description] (Version: [rippled version])"
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
about: Create a report to help us improve xrpld
|
||||
title: "[Title with short description] (Version: [xrpld version])"
|
||||
labels: ""
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!-- Please search existing issues to avoid creating duplicates.-->
|
||||
|
||||
## Issue Description
|
||||
|
||||
<!--Provide a summary for your issue/bug.-->
|
||||
|
||||
## Steps to Reproduce
|
||||
|
||||
<!--List in detail the exact steps to reproduce the unexpected behavior of the software.-->
|
||||
|
||||
## Expected Result
|
||||
|
||||
<!--Explain in detail what behavior you expected to happen.-->
|
||||
|
||||
## Actual Result
|
||||
|
||||
<!--Explain in detail what behavior actually happened.-->
|
||||
|
||||
## Environment
|
||||
|
||||
<!--Please describe your environment setup (such as Ubuntu 18.04 with Boost 1.70).-->
|
||||
<!-- If you are using a formal release, please use the version returned by './rippled --version' as the version number-->
|
||||
<!-- If you are using a formal release, please use the version returned by './xrpld --version' as the version number-->
|
||||
<!-- If you are working off of develop, please add the git hash via 'git rev-parse HEAD'-->
|
||||
|
||||
## Supporting Files
|
||||
|
||||
<!--If you have supporting files such as a log, feel free to post a link here using Github Gist.-->
|
||||
<!--Consider adding configuration files with private information removed via Github Gist. -->
|
||||
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/feature_request.md
vendored
8
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -3,19 +3,23 @@ name: Feature Request
|
||||
about: Suggest a new feature for the rippled project
|
||||
title: "[Title with short description] (Version: [rippled version])"
|
||||
labels: Feature Request
|
||||
assignees: ''
|
||||
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!-- Please search existing issues to avoid creating duplicates.-->
|
||||
|
||||
## Summary
|
||||
|
||||
<!-- Provide a summary to the feature request-->
|
||||
|
||||
## Motivation
|
||||
|
||||
<!-- Why do we need this feature?-->
|
||||
|
||||
## Solution
|
||||
|
||||
<!-- What is the solution?-->
|
||||
|
||||
## Paths Not Taken
|
||||
|
||||
<!-- What other alternatives have been considered?-->
|
||||
|
||||
48
.github/actions/build-deps/action.yml
vendored
Normal file
48
.github/actions/build-deps/action.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: Build Conan dependencies
|
||||
description: "Install Conan dependencies, optionally forcing a rebuild of all dependencies."
|
||||
|
||||
# Note that actions do not support 'type' and all inputs are strings, see
|
||||
# https://docs.github.com/en/actions/reference/workflows-and-actions/metadata-syntax#inputs.
|
||||
inputs:
|
||||
build_type:
|
||||
description: 'The build type to use ("Debug", "Release").'
|
||||
required: true
|
||||
build_nproc:
|
||||
description: "The number of processors to use for building."
|
||||
required: true
|
||||
force_build:
|
||||
description: 'Force building of all dependencies ("true", "false").'
|
||||
required: false
|
||||
default: "false"
|
||||
log_verbosity:
|
||||
description: "The logging verbosity."
|
||||
required: false
|
||||
default: "verbose"
|
||||
sanitizers:
|
||||
description: "The sanitizers to enable."
|
||||
required: false
|
||||
default: ""
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install Conan dependencies
|
||||
shell: bash
|
||||
env:
|
||||
BUILD_NPROC: ${{ inputs.build_nproc }}
|
||||
BUILD_OPTION: ${{ inputs.force_build == 'true' && '*' || 'missing' }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
LOG_VERBOSITY: ${{ inputs.log_verbosity }}
|
||||
SANITIZERS: ${{ inputs.sanitizers }}
|
||||
run: |
|
||||
echo 'Installing dependencies.'
|
||||
conan install \
|
||||
--profile ci \
|
||||
--build="${BUILD_OPTION}" \
|
||||
--options:host='&:tests=True' \
|
||||
--options:host='&:xrpld=True' \
|
||||
--settings:all build_type="${BUILD_TYPE}" \
|
||||
--conf:all tools.build:jobs=${BUILD_NPROC} \
|
||||
--conf:all tools.build:verbosity="${LOG_VERBOSITY}" \
|
||||
--conf:all tools.compilation:verbosity="${LOG_VERBOSITY}" \
|
||||
.
|
||||
32
.github/actions/build/action.yml
vendored
32
.github/actions/build/action.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: build
|
||||
inputs:
|
||||
generator:
|
||||
default: null
|
||||
configuration:
|
||||
required: true
|
||||
cmake-args:
|
||||
default: null
|
||||
cmake-target:
|
||||
default: all
|
||||
# An implicit input is the environment variable `build_dir`.
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: configure
|
||||
shell: bash
|
||||
run: |
|
||||
cd ${build_dir}
|
||||
cmake \
|
||||
${{ inputs.generator && format('-G "{0}"', inputs.generator) || '' }} \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
-DCMAKE_BUILD_TYPE=${{ inputs.configuration }} \
|
||||
${{ inputs.cmake-args }} \
|
||||
..
|
||||
- name: build
|
||||
shell: bash
|
||||
run: |
|
||||
cmake \
|
||||
--build ${build_dir} \
|
||||
--config ${{ inputs.configuration }} \
|
||||
--parallel ${NUM_PROCESSORS:-$(nproc)} \
|
||||
--target ${{ inputs.cmake-target }}
|
||||
58
.github/actions/dependencies/action.yml
vendored
58
.github/actions/dependencies/action.yml
vendored
@@ -1,58 +0,0 @@
|
||||
name: dependencies
|
||||
inputs:
|
||||
configuration:
|
||||
required: true
|
||||
# An implicit input is the environment variable `build_dir`.
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: unlock Conan
|
||||
shell: bash
|
||||
run: conan remove --locks
|
||||
- name: export custom recipes
|
||||
shell: bash
|
||||
run: |
|
||||
conan config set general.revisions_enabled=1
|
||||
conan export external/snappy snappy/1.1.10@
|
||||
conan export external/rocksdb rocksdb/6.29.5@
|
||||
conan export external/soci soci/4.0.3@
|
||||
- name: add Ripple Conan remote
|
||||
shell: bash
|
||||
run: |
|
||||
conan remote list
|
||||
conan remote remove ripple || true
|
||||
# Do not quote the URL. An empty string will be accepted (with
|
||||
# a non-fatal warning), but a missing argument will not.
|
||||
conan remote add ripple ${{ env.CONAN_URL }} --insert 0
|
||||
- name: try to authenticate to Ripple Conan remote
|
||||
id: remote
|
||||
shell: bash
|
||||
run: |
|
||||
# `conan user` implicitly uses the environment variables
|
||||
# CONAN_LOGIN_USERNAME_<REMOTE> and CONAN_PASSWORD_<REMOTE>.
|
||||
# https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables
|
||||
# https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name
|
||||
# https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name
|
||||
echo outcome=$(conan user --remote ripple --password >&2 \
|
||||
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
|
||||
- name: list missing binaries
|
||||
id: binaries
|
||||
shell: bash
|
||||
# Print the list of dependencies that would need to be built locally.
|
||||
# A non-empty list means we have "failed" to cache binaries remotely.
|
||||
run: |
|
||||
echo missing=$(conan info . --build missing --settings build_type=${{ inputs.configuration }} --json 2>/dev/null | grep '^\[') | tee ${GITHUB_OUTPUT}
|
||||
- name: install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir ${build_dir}
|
||||
cd ${build_dir}
|
||||
conan install \
|
||||
--output-folder . \
|
||||
--build missing \
|
||||
--settings build_type=${{ inputs.configuration }} \
|
||||
..
|
||||
- name: upload dependencies to remote
|
||||
if: (steps.binaries.outputs.missing != '[]') && (steps.remote.outputs.outcome == 'success')
|
||||
shell: bash
|
||||
run: conan upload --remote ripple '*' --all --parallel --confirm
|
||||
44
.github/actions/generate-version/action.yml
vendored
Normal file
44
.github/actions/generate-version/action.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: Generate build version number
|
||||
description: "Generate build version number."
|
||||
|
||||
outputs:
|
||||
version:
|
||||
description: "The generated build version number."
|
||||
value: ${{ steps.version.outputs.version }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
# When a tag is pushed, the version is used as-is.
|
||||
- name: Generate version for tag event
|
||||
if: ${{ github.event_name == 'tag' }}
|
||||
shell: bash
|
||||
env:
|
||||
VERSION: ${{ github.ref_name }}
|
||||
run: echo "VERSION=${VERSION}" >> "${GITHUB_ENV}"
|
||||
|
||||
# When a tag is not pushed, then the version (e.g. 1.2.3-b0) is extracted
|
||||
# from the BuildInfo.cpp file and the shortened commit hash appended to it.
|
||||
# We use a plus sign instead of a hyphen because Conan recipe versions do
|
||||
# not support two hyphens.
|
||||
- name: Generate version for non-tag event
|
||||
if: ${{ github.event_name != 'tag' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'Extracting version from BuildInfo.cpp.'
|
||||
VERSION="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" | awk -F '"' '{print $2}')"
|
||||
if [[ -z "${VERSION}" ]]; then
|
||||
echo 'Unable to extract version from BuildInfo.cpp.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo 'Appending shortened commit hash to version.'
|
||||
SHA='${{ github.sha }}'
|
||||
VERSION="${VERSION}+${SHA:0:7}"
|
||||
|
||||
echo "VERSION=${VERSION}" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Output version
|
||||
id: version
|
||||
shell: bash
|
||||
run: echo "version=${VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
43
.github/actions/print-env/action.yml
vendored
Normal file
43
.github/actions/print-env/action.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Print build environment
|
||||
description: "Print environment and some tooling versions"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Check configuration (Windows)
|
||||
if: ${{ runner.os == 'Windows' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'Checking environment variables.'
|
||||
set
|
||||
|
||||
- name: Check configuration (Linux and macOS)
|
||||
if: ${{ runner.os == 'Linux' || runner.os == 'macOS' }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'Checking path.'
|
||||
echo ${PATH} | tr ':' '\n'
|
||||
|
||||
echo 'Checking environment variables.'
|
||||
env | sort
|
||||
|
||||
echo 'Checking compiler version.'
|
||||
${{ runner.os == 'Linux' && '${CC}' || 'clang' }} --version
|
||||
|
||||
echo 'Checking Ninja version.'
|
||||
ninja --version
|
||||
|
||||
echo 'Checking nproc version.'
|
||||
nproc --version
|
||||
|
||||
- name: Check configuration (all)
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'Checking Ccache version.'
|
||||
ccache --version
|
||||
|
||||
echo 'Checking CMake version.'
|
||||
cmake --version
|
||||
|
||||
echo 'Checking Conan version.'
|
||||
conan --version
|
||||
46
.github/actions/setup-conan/action.yml
vendored
Normal file
46
.github/actions/setup-conan/action.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Setup Conan
|
||||
description: "Set up Conan configuration, profile, and remote."
|
||||
|
||||
inputs:
|
||||
remote_name:
|
||||
description: "The name of the Conan remote to use."
|
||||
required: false
|
||||
default: xrplf
|
||||
remote_url:
|
||||
description: "The URL of the Conan endpoint to use."
|
||||
required: false
|
||||
default: https://conan.ripplex.io
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
|
||||
steps:
|
||||
- name: Set up Conan configuration
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'Installing configuration.'
|
||||
cat conan/global.conf ${{ runner.os == 'Linux' && '>>' || '>' }} $(conan config home)/global.conf
|
||||
|
||||
echo 'Conan configuration:'
|
||||
conan config show '*'
|
||||
|
||||
- name: Set up Conan profile
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'Installing profile.'
|
||||
conan config install conan/profiles/ -tf $(conan config home)/profiles/
|
||||
|
||||
echo 'Conan profile:'
|
||||
conan profile show --profile ci
|
||||
|
||||
- name: Set up Conan remote
|
||||
shell: bash
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
REMOTE_URL: ${{ inputs.remote_url }}
|
||||
run: |
|
||||
echo "Adding Conan remote '${REMOTE_NAME}' at '${REMOTE_URL}'."
|
||||
conan remote add --index 0 --force "${REMOTE_NAME}" "${REMOTE_URL}"
|
||||
|
||||
echo 'Listing Conan remotes.'
|
||||
conan remote list
|
||||
134
.github/scripts/levelization/README.md
vendored
Normal file
134
.github/scripts/levelization/README.md
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
# Levelization
|
||||
|
||||
Levelization is the term used to describe efforts to prevent rippled from
|
||||
having or creating cyclic dependencies.
|
||||
|
||||
rippled code is organized into directories under `src/xrpld`, `src/libxrpl` (and
|
||||
`src/test`) representing modules. The modules are intended to be
|
||||
organized into "tiers" or "levels" such that a module from one level can
|
||||
only include code from lower levels. Additionally, a module
|
||||
in one level should never include code in an `impl` or `detail` folder of any level
|
||||
other than it's own.
|
||||
|
||||
The codebase is split into two main areas:
|
||||
|
||||
- **libxrpl** (`src/libxrpl`, `include/xrpl`): Reusable library modules with public interfaces
|
||||
- **xrpld** (`src/xrpld`): Application-specific implementation code
|
||||
|
||||
Unfortunately, over time, enforcement of levelization has been
|
||||
inconsistent, so the current state of the code doesn't necessarily
|
||||
reflect these rules. Whenever possible, developers should refactor any
|
||||
levelization violations they find (by moving files or individual
|
||||
classes). At the very least, don't make things worse.
|
||||
|
||||
The table below summarizes the _desired_ division of modules, based on the current
|
||||
state of the rippled code. The levels are numbered from
|
||||
the bottom up with the lower level, lower numbered, more independent
|
||||
modules listed first, and the higher level, higher numbered modules with
|
||||
more dependencies listed later.
|
||||
|
||||
**tl;dr:** The modules listed first are more independent than the modules
|
||||
listed later.
|
||||
|
||||
## libxrpl Modules (Reusable Libraries)
|
||||
|
||||
| Level / Tier | Module(s) |
|
||||
| ------------ | ----------------------------------- |
|
||||
| 01 | xrpl/beast |
|
||||
| 02 | xrpl/basics |
|
||||
| 03 | xrpl/json xrpl/crypto |
|
||||
| 04 | xrpl/protocol |
|
||||
| 05 | xrpl/core xrpl/resource xrpl/server |
|
||||
| 06 | xrpl/ledger xrpl/nodestore xrpl/net |
|
||||
| 07 | xrpl/shamap |
|
||||
|
||||
## xrpld Modules (Application Implementation)
|
||||
|
||||
| Level / Tier | Module(s) |
|
||||
| ------------ | -------------------------------- |
|
||||
| 05 | xrpld/conditions xrpld/consensus |
|
||||
| 06 | xrpld/core xrpld/peerfinder |
|
||||
| 07 | xrpld/shamap xrpld/overlay |
|
||||
| 08 | xrpld/app |
|
||||
| 09 | xrpld/rpc |
|
||||
| 10 | xrpld/perflog |
|
||||
|
||||
## Test Modules
|
||||
|
||||
| Level / Tier | Module(s) |
|
||||
| ------------ | -------------------------------------------------------------------------------------------------------- |
|
||||
| 11 | test/jtx test/beast test/csf |
|
||||
| 12 | test/unit_test |
|
||||
| 13 | test/crypto test/conditions test/json test/resource test/shamap test/peerfinder test/basics test/overlay |
|
||||
| 14 | test |
|
||||
| 15 | test/net test/protocol test/ledger test/consensus test/core test/server test/nodestore |
|
||||
| 16 | test/rpc test/app |
|
||||
|
||||
(Note that `test` levelization is _much_ less important and _much_ less
|
||||
strictly enforced than `xrpl`/`xrpld` levelization, other than the requirement
|
||||
that `test` code should _never_ be included in `xrpl` or `xrpld` code.)
|
||||
|
||||
## Validation
|
||||
|
||||
The [levelization](generate.sh) script takes no parameters,
|
||||
reads no environment variables, and can be run from any directory,
|
||||
as long as it is in the expected location in the rippled repo.
|
||||
It can be run at any time from within a checked out repo, and will
|
||||
do an analysis of all the `#include`s in
|
||||
the rippled source. The only caveat is that it runs much slower
|
||||
under Windows than in Linux. It hasn't yet been tested under MacOS.
|
||||
It generates many files of [results](results):
|
||||
|
||||
- `rawincludes.txt`: The raw dump of the `#includes`
|
||||
- `paths.txt`: A second dump grouping the source module
|
||||
to the destination module, de-duped, and with frequency counts.
|
||||
- `includes/`: A directory where each file represents a module and
|
||||
contains a list of modules and counts that the module _includes_.
|
||||
- `included_by/`: Similar to `includes/`, but the other way around. Each
|
||||
file represents a module and contains a list of modules and counts
|
||||
that _include_ the module.
|
||||
- [`loops.txt`](results/loops.txt): A list of direct loops detected
|
||||
between modules as they actually exist, as opposed to how they are
|
||||
desired as described above. In a perfect repo, this file will be
|
||||
empty.
|
||||
This file is committed to the repo, and is used by the [levelization
|
||||
Github workflow](../../workflows/reusable-check-levelization.yml) to validate
|
||||
that nothing changed.
|
||||
- [`ordering.txt`](results/ordering.txt): A list showing relationships
|
||||
between modules where there are no loops as they actually exist, as
|
||||
opposed to how they are desired as described above.
|
||||
This file is committed to the repo, and is used by the [levelization
|
||||
Github workflow](../../workflows/reusable-check-levelization.yml) to validate
|
||||
that nothing changed.
|
||||
- [`levelization.yml`](../../workflows/reusable-check-levelization.yml)
|
||||
Github Actions workflow to test that levelization loops haven't
|
||||
changed. Unfortunately, if changes are detected, it can't tell if
|
||||
they are improvements or not, so if you have resolved any issues or
|
||||
done anything else to improve levelization, run `levelization.sh`,
|
||||
and commit the updated results.
|
||||
|
||||
The `loops.txt` and `ordering.txt` files relate the modules
|
||||
using comparison signs, which indicate the number of times each
|
||||
module is included in the other.
|
||||
|
||||
- `A > B` means that A should probably be at a higher level than B,
|
||||
because B is included in A significantly more than A is included in B.
|
||||
These results can be included in both `loops.txt` and `ordering.txt`.
|
||||
Because `ordering.txt`only includes relationships where B is not
|
||||
included in A at all, it will only include these types of results.
|
||||
- `A ~= B` means that A and B are included in each other a different
|
||||
number of times, but the values are so close that the script can't
|
||||
definitively say that one should be above the other. These results
|
||||
will only be included in `loops.txt`.
|
||||
- `A == B` means that A and B include each other the same number of
|
||||
times, so the script has no clue which should be higher. These results
|
||||
will only be included in `loops.txt`.
|
||||
|
||||
The committed files hide the detailed values intentionally, to
|
||||
prevent false alarms and merging issues, and because it's easy to
|
||||
get those details locally.
|
||||
|
||||
1. Run `levelization.sh`
|
||||
2. Grep the modules in `paths.txt`.
|
||||
- For example, if a cycle is found `A ~= B`, simply `grep -w
|
||||
A .github/scripts/levelization/results/paths.txt | grep -w B`
|
||||
130
.github/scripts/levelization/generate.sh
vendored
Executable file
130
.github/scripts/levelization/generate.sh
vendored
Executable file
@@ -0,0 +1,130 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Usage: generate.sh
|
||||
# This script takes no parameters, reads no environment variables,
|
||||
# and can be run from any directory, as long as it is in the expected
|
||||
# location in the repo.
|
||||
|
||||
pushd $( dirname $0 )
|
||||
|
||||
if [ -v PS1 ]
|
||||
then
|
||||
# if the shell is interactive, clean up any flotsam before analyzing
|
||||
git clean -ix
|
||||
fi
|
||||
|
||||
# Ensure all sorting is ASCII-order consistently across platforms.
|
||||
export LANG=C
|
||||
|
||||
rm -rfv results
|
||||
mkdir results
|
||||
includes="$( pwd )/results/rawincludes.txt"
|
||||
pushd ../../..
|
||||
echo Raw includes:
|
||||
grep -r '^[ ]*#include.*/.*\.h' include src | \
|
||||
grep -v boost | tee ${includes}
|
||||
popd
|
||||
pushd results
|
||||
|
||||
oldifs=${IFS}
|
||||
IFS=:
|
||||
mkdir includes
|
||||
mkdir included_by
|
||||
echo Build levelization paths
|
||||
exec 3< ${includes} # open rawincludes.txt for input
|
||||
while read -r -u 3 file include
|
||||
do
|
||||
level=$( echo ${file} | cut -d/ -f 2,3 )
|
||||
# If the "level" indicates a file, cut off the filename
|
||||
if [[ "${level##*.}" != "${level}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
level="$( dirname ${level} )/toplevel"
|
||||
fi
|
||||
level=$( echo ${level} | tr '/' '.' )
|
||||
|
||||
includelevel=$( echo ${include} | sed 's/.*["<]//; s/[">].*//' | \
|
||||
cut -d/ -f 1,2 )
|
||||
if [[ "${includelevel##*.}" != "${includelevel}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
includelevel="$( dirname ${includelevel} )/toplevel"
|
||||
fi
|
||||
includelevel=$( echo ${includelevel} | tr '/' '.' )
|
||||
|
||||
if [[ "$level" != "$includelevel" ]]
|
||||
then
|
||||
echo $level $includelevel | tee -a paths.txt
|
||||
fi
|
||||
done
|
||||
echo Sort and deduplicate paths
|
||||
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
|
||||
mv sortedpaths.txt paths.txt
|
||||
exec 3>&- #close fd 3
|
||||
IFS=${oldifs}
|
||||
unset oldifs
|
||||
|
||||
echo Split into flat-file database
|
||||
exec 4<paths.txt # open paths.txt for input
|
||||
while read -r -u 4 count level include
|
||||
do
|
||||
echo ${include} ${count} | tee -a includes/${level}
|
||||
echo ${level} ${count} | tee -a included_by/${include}
|
||||
done
|
||||
exec 4>&- #close fd 4
|
||||
|
||||
loops="$( pwd )/loops.txt"
|
||||
ordering="$( pwd )/ordering.txt"
|
||||
pushd includes
|
||||
echo Search for loops
|
||||
# Redirect stdout to a file
|
||||
exec 4>&1
|
||||
exec 1>"${loops}"
|
||||
for source in *
|
||||
do
|
||||
if [[ -f "$source" ]]
|
||||
then
|
||||
exec 5<"${source}" # open for input
|
||||
while read -r -u 5 include includefreq
|
||||
do
|
||||
if [[ -f $include ]]
|
||||
then
|
||||
if grep -q -w $source $include
|
||||
then
|
||||
if grep -q -w "Loop: $include $source" "${loops}"
|
||||
then
|
||||
continue
|
||||
fi
|
||||
sourcefreq=$( grep -w $source $include | cut -d\ -f2 )
|
||||
echo "Loop: $source $include"
|
||||
# If the counts are close, indicate that the two modules are
|
||||
# on the same level, though they shouldn't be
|
||||
if [[ $(( $includefreq - $sourcefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $source > $include\n"
|
||||
elif [[ $(( $sourcefreq - $includefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $include > $source\n"
|
||||
elif [[ $sourcefreq -eq $includefreq ]]
|
||||
then
|
||||
echo -e " $include == $source\n"
|
||||
else
|
||||
echo -e " $include ~= $source\n"
|
||||
fi
|
||||
else
|
||||
echo "$source > $include" >> "${ordering}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
exec 5>&- #close fd 5
|
||||
fi
|
||||
done
|
||||
exec 1>&4 #close fd 1
|
||||
exec 4>&- #close fd 4
|
||||
cat "${ordering}"
|
||||
cat "${loops}"
|
||||
popd
|
||||
popd
|
||||
popd
|
||||
21
.github/scripts/levelization/results/loops.txt
vendored
Normal file
21
.github/scripts/levelization/results/loops.txt
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
Loop: test.jtx test.toplevel
|
||||
test.toplevel > test.jtx
|
||||
|
||||
Loop: test.jtx test.unit_test
|
||||
test.unit_test == test.jtx
|
||||
|
||||
Loop: xrpld.app xrpld.overlay
|
||||
xrpld.overlay > xrpld.app
|
||||
|
||||
Loop: xrpld.app xrpld.peerfinder
|
||||
xrpld.peerfinder == xrpld.app
|
||||
|
||||
Loop: xrpld.app xrpld.rpc
|
||||
xrpld.rpc > xrpld.app
|
||||
|
||||
Loop: xrpld.app xrpld.shamap
|
||||
xrpld.shamap ~= xrpld.app
|
||||
|
||||
Loop: xrpld.overlay xrpld.rpc
|
||||
xrpld.rpc ~= xrpld.overlay
|
||||
|
||||
240
.github/scripts/levelization/results/ordering.txt
vendored
Normal file
240
.github/scripts/levelization/results/ordering.txt
vendored
Normal file
@@ -0,0 +1,240 @@
|
||||
libxrpl.basics > xrpl.basics
|
||||
libxrpl.core > xrpl.basics
|
||||
libxrpl.core > xrpl.core
|
||||
libxrpl.crypto > xrpl.basics
|
||||
libxrpl.json > xrpl.basics
|
||||
libxrpl.json > xrpl.json
|
||||
libxrpl.ledger > xrpl.basics
|
||||
libxrpl.ledger > xrpl.json
|
||||
libxrpl.ledger > xrpl.ledger
|
||||
libxrpl.ledger > xrpl.protocol
|
||||
libxrpl.net > xrpl.basics
|
||||
libxrpl.net > xrpl.net
|
||||
libxrpl.nodestore > xrpl.basics
|
||||
libxrpl.nodestore > xrpl.json
|
||||
libxrpl.nodestore > xrpl.nodestore
|
||||
libxrpl.nodestore > xrpl.protocol
|
||||
libxrpl.protocol > xrpl.basics
|
||||
libxrpl.protocol > xrpl.json
|
||||
libxrpl.protocol > xrpl.protocol
|
||||
libxrpl.rdb > xrpl.basics
|
||||
libxrpl.rdb > xrpl.rdb
|
||||
libxrpl.resource > xrpl.basics
|
||||
libxrpl.resource > xrpl.json
|
||||
libxrpl.resource > xrpl.resource
|
||||
libxrpl.server > xrpl.basics
|
||||
libxrpl.server > xrpl.json
|
||||
libxrpl.server > xrpl.protocol
|
||||
libxrpl.server > xrpl.rdb
|
||||
libxrpl.server > xrpl.server
|
||||
libxrpl.shamap > xrpl.basics
|
||||
libxrpl.shamap > xrpl.protocol
|
||||
libxrpl.shamap > xrpl.shamap
|
||||
test.app > test.jtx
|
||||
test.app > test.rpc
|
||||
test.app > test.toplevel
|
||||
test.app > test.unit_test
|
||||
test.app > xrpl.basics
|
||||
test.app > xrpl.core
|
||||
test.app > xrpld.app
|
||||
test.app > xrpld.core
|
||||
test.app > xrpld.overlay
|
||||
test.app > xrpld.rpc
|
||||
test.app > xrpl.json
|
||||
test.app > xrpl.ledger
|
||||
test.app > xrpl.nodestore
|
||||
test.app > xrpl.protocol
|
||||
test.app > xrpl.rdb
|
||||
test.app > xrpl.resource
|
||||
test.app > xrpl.server
|
||||
test.basics > test.jtx
|
||||
test.basics > test.unit_test
|
||||
test.basics > xrpl.basics
|
||||
test.basics > xrpl.core
|
||||
test.basics > xrpld.rpc
|
||||
test.basics > xrpl.json
|
||||
test.basics > xrpl.protocol
|
||||
test.beast > xrpl.basics
|
||||
test.conditions > xrpl.basics
|
||||
test.conditions > xrpld.conditions
|
||||
test.consensus > test.csf
|
||||
test.consensus > test.toplevel
|
||||
test.consensus > test.unit_test
|
||||
test.consensus > xrpl.basics
|
||||
test.consensus > xrpld.app
|
||||
test.consensus > xrpld.consensus
|
||||
test.consensus > xrpl.json
|
||||
test.consensus > xrpl.ledger
|
||||
test.core > test.jtx
|
||||
test.core > test.toplevel
|
||||
test.core > test.unit_test
|
||||
test.core > xrpl.basics
|
||||
test.core > xrpl.core
|
||||
test.core > xrpld.core
|
||||
test.core > xrpl.json
|
||||
test.core > xrpl.rdb
|
||||
test.core > xrpl.server
|
||||
test.csf > xrpl.basics
|
||||
test.csf > xrpld.consensus
|
||||
test.csf > xrpl.json
|
||||
test.csf > xrpl.protocol
|
||||
test.json > test.jtx
|
||||
test.json > xrpl.json
|
||||
test.jtx > xrpl.basics
|
||||
test.jtx > xrpld.app
|
||||
test.jtx > xrpld.core
|
||||
test.jtx > xrpld.rpc
|
||||
test.jtx > xrpl.json
|
||||
test.jtx > xrpl.ledger
|
||||
test.jtx > xrpl.net
|
||||
test.jtx > xrpl.protocol
|
||||
test.jtx > xrpl.resource
|
||||
test.jtx > xrpl.server
|
||||
test.ledger > test.jtx
|
||||
test.ledger > test.toplevel
|
||||
test.ledger > xrpl.basics
|
||||
test.ledger > xrpld.app
|
||||
test.ledger > xrpld.core
|
||||
test.ledger > xrpl.ledger
|
||||
test.ledger > xrpl.protocol
|
||||
test.nodestore > test.jtx
|
||||
test.nodestore > test.toplevel
|
||||
test.nodestore > test.unit_test
|
||||
test.nodestore > xrpl.basics
|
||||
test.nodestore > xrpl.nodestore
|
||||
test.nodestore > xrpl.rdb
|
||||
test.overlay > test.jtx
|
||||
test.overlay > test.toplevel
|
||||
test.overlay > test.unit_test
|
||||
test.overlay > xrpl.basics
|
||||
test.overlay > xrpld.app
|
||||
test.overlay > xrpld.overlay
|
||||
test.overlay > xrpld.peerfinder
|
||||
test.overlay > xrpl.nodestore
|
||||
test.overlay > xrpl.protocol
|
||||
test.overlay > xrpl.shamap
|
||||
test.peerfinder > test.beast
|
||||
test.peerfinder > test.unit_test
|
||||
test.peerfinder > xrpl.basics
|
||||
test.peerfinder > xrpld.core
|
||||
test.peerfinder > xrpld.peerfinder
|
||||
test.peerfinder > xrpl.protocol
|
||||
test.protocol > test.toplevel
|
||||
test.protocol > xrpl.basics
|
||||
test.protocol > xrpl.json
|
||||
test.protocol > xrpl.protocol
|
||||
test.resource > test.unit_test
|
||||
test.resource > xrpl.basics
|
||||
test.resource > xrpl.resource
|
||||
test.rpc > test.jtx
|
||||
test.rpc > test.toplevel
|
||||
test.rpc > xrpl.basics
|
||||
test.rpc > xrpl.core
|
||||
test.rpc > xrpld.app
|
||||
test.rpc > xrpld.core
|
||||
test.rpc > xrpld.overlay
|
||||
test.rpc > xrpld.rpc
|
||||
test.rpc > xrpl.json
|
||||
test.rpc > xrpl.protocol
|
||||
test.rpc > xrpl.resource
|
||||
test.server > test.jtx
|
||||
test.server > test.toplevel
|
||||
test.server > test.unit_test
|
||||
test.server > xrpl.basics
|
||||
test.server > xrpld.app
|
||||
test.server > xrpld.core
|
||||
test.server > xrpld.rpc
|
||||
test.server > xrpl.json
|
||||
test.server > xrpl.server
|
||||
test.shamap > test.unit_test
|
||||
test.shamap > xrpl.basics
|
||||
test.shamap > xrpl.nodestore
|
||||
test.shamap > xrpl.protocol
|
||||
test.shamap > xrpl.shamap
|
||||
test.toplevel > test.csf
|
||||
test.toplevel > xrpl.json
|
||||
test.unit_test > xrpl.basics
|
||||
tests.libxrpl > xrpl.basics
|
||||
tests.libxrpl > xrpl.json
|
||||
tests.libxrpl > xrpl.net
|
||||
xrpl.core > xrpl.basics
|
||||
xrpl.core > xrpl.json
|
||||
xrpl.core > xrpl.ledger
|
||||
xrpl.core > xrpl.protocol
|
||||
xrpl.json > xrpl.basics
|
||||
xrpl.ledger > xrpl.basics
|
||||
xrpl.ledger > xrpl.protocol
|
||||
xrpl.net > xrpl.basics
|
||||
xrpl.nodestore > xrpl.basics
|
||||
xrpl.nodestore > xrpl.protocol
|
||||
xrpl.protocol > xrpl.basics
|
||||
xrpl.protocol > xrpl.json
|
||||
xrpl.rdb > xrpl.basics
|
||||
xrpl.rdb > xrpl.core
|
||||
xrpl.rdb > xrpl.protocol
|
||||
xrpl.resource > xrpl.basics
|
||||
xrpl.resource > xrpl.json
|
||||
xrpl.resource > xrpl.protocol
|
||||
xrpl.server > xrpl.basics
|
||||
xrpl.server > xrpl.core
|
||||
xrpl.server > xrpl.json
|
||||
xrpl.server > xrpl.protocol
|
||||
xrpl.server > xrpl.rdb
|
||||
xrpl.shamap > xrpl.basics
|
||||
xrpl.shamap > xrpl.nodestore
|
||||
xrpl.shamap > xrpl.protocol
|
||||
xrpld.app > test.unit_test
|
||||
xrpld.app > xrpl.basics
|
||||
xrpld.app > xrpl.core
|
||||
xrpld.app > xrpld.conditions
|
||||
xrpld.app > xrpld.consensus
|
||||
xrpld.app > xrpld.core
|
||||
xrpld.app > xrpl.json
|
||||
xrpld.app > xrpl.ledger
|
||||
xrpld.app > xrpl.net
|
||||
xrpld.app > xrpl.nodestore
|
||||
xrpld.app > xrpl.protocol
|
||||
xrpld.app > xrpl.rdb
|
||||
xrpld.app > xrpl.resource
|
||||
xrpld.app > xrpl.server
|
||||
xrpld.app > xrpl.shamap
|
||||
xrpld.conditions > xrpl.basics
|
||||
xrpld.conditions > xrpl.protocol
|
||||
xrpld.consensus > xrpl.basics
|
||||
xrpld.consensus > xrpl.json
|
||||
xrpld.consensus > xrpl.protocol
|
||||
xrpld.core > xrpl.basics
|
||||
xrpld.core > xrpl.core
|
||||
xrpld.core > xrpl.json
|
||||
xrpld.core > xrpl.net
|
||||
xrpld.core > xrpl.protocol
|
||||
xrpld.core > xrpl.rdb
|
||||
xrpld.overlay > xrpl.basics
|
||||
xrpld.overlay > xrpl.core
|
||||
xrpld.overlay > xrpld.core
|
||||
xrpld.overlay > xrpld.peerfinder
|
||||
xrpld.overlay > xrpl.json
|
||||
xrpld.overlay > xrpl.protocol
|
||||
xrpld.overlay > xrpl.rdb
|
||||
xrpld.overlay > xrpl.resource
|
||||
xrpld.overlay > xrpl.server
|
||||
xrpld.peerfinder > xrpl.basics
|
||||
xrpld.peerfinder > xrpld.core
|
||||
xrpld.peerfinder > xrpl.protocol
|
||||
xrpld.peerfinder > xrpl.rdb
|
||||
xrpld.perflog > xrpl.basics
|
||||
xrpld.perflog > xrpl.core
|
||||
xrpld.perflog > xrpld.rpc
|
||||
xrpld.perflog > xrpl.json
|
||||
xrpld.rpc > xrpl.basics
|
||||
xrpld.rpc > xrpl.core
|
||||
xrpld.rpc > xrpld.core
|
||||
xrpld.rpc > xrpl.json
|
||||
xrpld.rpc > xrpl.ledger
|
||||
xrpld.rpc > xrpl.net
|
||||
xrpld.rpc > xrpl.nodestore
|
||||
xrpld.rpc > xrpl.protocol
|
||||
xrpld.rpc > xrpl.rdb
|
||||
xrpld.rpc > xrpl.resource
|
||||
xrpld.rpc > xrpl.server
|
||||
xrpld.shamap > xrpl.shamap
|
||||
47
.github/scripts/rename/README.md
vendored
Normal file
47
.github/scripts/rename/README.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
## Renaming ripple(d) to xrpl(d)
|
||||
|
||||
In the initial phases of development of the XRPL, the open source codebase was
|
||||
called "rippled" and it remains with that name even today. Today, over 1000
|
||||
nodes run the application, and code contributions have been submitted by
|
||||
developers located around the world. The XRPL community is larger than ever.
|
||||
In light of the decentralized and diversified nature of XRPL, we will rename any
|
||||
references to `ripple` and `rippled` to `xrpl` and `xrpld`, when appropriate.
|
||||
|
||||
See [here](https://xls.xrpl.org/xls/XLS-0095-rename-rippled-to-xrpld.html) for
|
||||
more information.
|
||||
|
||||
### Scripts
|
||||
|
||||
To facilitate this transition, there will be multiple scripts that developers
|
||||
can run on their own PRs and forks to minimize conflicts. Each script should be
|
||||
run from the repository root.
|
||||
|
||||
1. `.github/scripts/rename/definitions.sh`: This script will rename all
|
||||
definitions, such as include guards, from `RIPPLE_XXX` and `RIPPLED_XXX` to
|
||||
`XRPL_XXX`.
|
||||
2. `.github/scripts/rename/copyright.sh`: This script will remove superfluous
|
||||
copyright notices.
|
||||
3. `.github/scripts/rename/cmake.sh`: This script will rename all CMake files
|
||||
from `RippleXXX.cmake` or `RippledXXX.cmake` to `XrplXXX.cmake`, and any
|
||||
references to `ripple` and `rippled` (with or without capital letters) to
|
||||
`xrpl` and `xrpld`, respectively. The name of the binary will remain as-is,
|
||||
and will only be renamed to `xrpld` by a later script.
|
||||
4. `.github/scripts/rename/binary.sh`: This script will rename the binary from
|
||||
`rippled` to `xrpld`, and reverses the symlink so that `rippled` points to
|
||||
the `xrpld` binary.
|
||||
5. `.github/scripts/rename/namespace.sh`: This script will rename the C++
|
||||
namespaces from `ripple` to `xrpl`.
|
||||
6. `.github/scripts/rename/config.sh`: This script will rename the config from
|
||||
`rippled.cfg` to `xrpld.cfg`, and updating the code accordingly. The old
|
||||
filename will still be accepted.
|
||||
|
||||
You can run all these scripts from the repository root as follows:
|
||||
|
||||
```shell
|
||||
./.github/scripts/rename/definitions.sh .
|
||||
./.github/scripts/rename/copyright.sh .
|
||||
./.github/scripts/rename/cmake.sh .
|
||||
./.github/scripts/rename/binary.sh .
|
||||
./.github/scripts/rename/namespace.sh .
|
||||
./.github/scripts/rename/config.sh .
|
||||
```
|
||||
54
.github/scripts/rename/binary.sh
vendored
Executable file
54
.github/scripts/rename/binary.sh
vendored
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit the script as soon as an error occurs.
|
||||
set -e
|
||||
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script changes the binary name from `rippled` to `xrpld`, and reverses
|
||||
# the symlink that currently points from `xrpld` to `rippled` so that it points
|
||||
# from `rippled` to `xrpld` instead.
|
||||
# Usage: .github/scripts/rename/binary.sh <repository directory>
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <repository directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY=$1
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
if [ ! -d "${DIRECTORY}" ]; then
|
||||
echo "Error: Directory '${DIRECTORY}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
pushd ${DIRECTORY}
|
||||
|
||||
# Remove the binary name override added by the cmake.sh script.
|
||||
${SED_COMMAND} -z -i -E 's@\s+# For the time being.+"rippled"\)@@' cmake/XrplCore.cmake
|
||||
|
||||
# Reverse the symlink.
|
||||
${SED_COMMAND} -i -E 's@create_symbolic_link\(rippled@create_symbolic_link(xrpld@' cmake/XrplInstall.cmake
|
||||
${SED_COMMAND} -i -E 's@/xrpld\$\{suffix\}@/rippled${suffix}@' cmake/XrplInstall.cmake
|
||||
|
||||
# Rename references to the binary.
|
||||
${SED_COMMAND} -i -E 's@rippled@xrpld@g' BUILD.md
|
||||
${SED_COMMAND} -i -E 's@rippled@xrpld@g' CONTRIBUTING.md
|
||||
${SED_COMMAND} -i -E 's@rippled@xrpld@g' .github/ISSUE_TEMPLATE/bug_report.md
|
||||
|
||||
# Restore and/or fix certain renames. The pre-commit hook will update the
|
||||
# formatting upon saving/committing.
|
||||
${SED_COMMAND} -i -E 's@ripple/xrpld@XRPLF/rippled@g' BUILD.md
|
||||
${SED_COMMAND} -i -E 's@XRPLF/xrpld@XRPLF/rippled@g' BUILD.md
|
||||
${SED_COMMAND} -i -E 's@xrpld \(`xrpld`\)@xrpld@g' BUILD.md
|
||||
${SED_COMMAND} -i -E 's@XRPLF/xrpld@XRPLF/rippled@g' CONTRIBUTING.md
|
||||
|
||||
popd
|
||||
echo "Processing complete."
|
||||
92
.github/scripts/rename/cmake.sh
vendored
Executable file
92
.github/scripts/rename/cmake.sh
vendored
Executable file
@@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit the script as soon as an error occurs.
|
||||
set -e
|
||||
|
||||
# On MacOS, ensure that GNU sed and head are installed and available as `gsed`
|
||||
# and `ghead`, respectively.
|
||||
SED_COMMAND=sed
|
||||
HEAD_COMMAND=head
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
if ! command -v ghead &> /dev/null; then
|
||||
echo "Error: ghead is not installed. Please install it using 'brew install coreutils'."
|
||||
exit 1
|
||||
fi
|
||||
HEAD_COMMAND=ghead
|
||||
fi
|
||||
|
||||
# This script renames CMake files from `RippleXXX.cmake` or `RippledXXX.cmake`
|
||||
# to `XrplXXX.cmake`, and any references to `ripple` and `rippled` (with or
|
||||
# without capital letters) to `xrpl` and `xrpld`, respectively. The name of the
|
||||
# binary will remain as-is, and will only be renamed to `xrpld` in a different
|
||||
# script, but the proto file will be renamed.
|
||||
# Usage: .github/scripts/rename/cmake.sh <repository directory>
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <repository directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY=$1
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
if [ ! -d "${DIRECTORY}" ]; then
|
||||
echo "Error: Directory '${DIRECTORY}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
pushd ${DIRECTORY}
|
||||
|
||||
# Rename the files.
|
||||
find cmake -type f -name 'Rippled*.cmake' -exec bash -c 'mv "${1}" "${1/Rippled/Xrpl}"' - {} \;
|
||||
find cmake -type f -name 'Ripple*.cmake' -exec bash -c 'mv "${1}" "${1/Ripple/Xrpl}"' - {} \;
|
||||
if [ -e cmake/xrpl_add_test.cmake ]; then
|
||||
mv cmake/xrpl_add_test.cmake cmake/XrplAddTest.cmake
|
||||
fi
|
||||
if [ -e include/xrpl/proto/ripple.proto ]; then
|
||||
mv include/xrpl/proto/ripple.proto include/xrpl/proto/xrpl.proto
|
||||
fi
|
||||
|
||||
# Rename inside the files.
|
||||
find cmake -type f -name '*.cmake' | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i 's/Rippled/Xrpld/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/Ripple/Xrpl/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/ripple/xrpl/g' "${FILE}"
|
||||
done
|
||||
${SED_COMMAND} -i -E 's/Rippled?/Xrpl/g' CMakeLists.txt
|
||||
${SED_COMMAND} -i 's/ripple/xrpl/g' CMakeLists.txt
|
||||
${SED_COMMAND} -i 's/include(xrpl_add_test)/include(XrplAddTest)/' src/tests/libxrpl/CMakeLists.txt
|
||||
${SED_COMMAND} -i 's/ripple.pb.h/xrpl.pb.h/' include/xrpl/protocol/messages.h
|
||||
${SED_COMMAND} -i 's/ripple.pb.h/xrpl.pb.h/' BUILD.md
|
||||
${SED_COMMAND} -i 's/ripple.pb.h/xrpl.pb.h/' BUILD.md
|
||||
|
||||
# Restore the name of the validator keys repository.
|
||||
${SED_COMMAND} -i 's@xrpl/validator-keys-tool@ripple/validator-keys-tool@' cmake/XrplValidatorKeys.cmake
|
||||
|
||||
# Ensure the name of the binary and config remain 'rippled' for now.
|
||||
${SED_COMMAND} -i -E 's/xrpld(-example)?\.cfg/rippled\1.cfg/g' cmake/XrplInstall.cmake
|
||||
if grep -q '"xrpld"' cmake/XrplCore.cmake; then
|
||||
# The script has been rerun, so just restore the name of the binary.
|
||||
${SED_COMMAND} -i 's/"xrpld"/"rippled"/' cmake/XrplCore.cmake
|
||||
elif ! grep -q '"rippled"' cmake/XrplCore.cmake; then
|
||||
${HEAD_COMMAND} -n -1 cmake/XrplCore.cmake > cmake.tmp
|
||||
echo ' # For the time being, we will keep the name of the binary as it was.' >> cmake.tmp
|
||||
echo ' set_target_properties(xrpld PROPERTIES OUTPUT_NAME "rippled")' >> cmake.tmp
|
||||
tail -1 cmake/XrplCore.cmake >> cmake.tmp
|
||||
mv cmake.tmp cmake/XrplCore.cmake
|
||||
fi
|
||||
|
||||
# Restore the symlink from 'xrpld' to 'rippled'.
|
||||
${SED_COMMAND} -i -E 's@create_symbolic_link\(xrpld@create_symbolic_link(rippled@' cmake/XrplInstall.cmake
|
||||
|
||||
# Remove the symlink that previously pointed from 'ripple' to 'xrpl' but now is
|
||||
# no longer needed.
|
||||
${SED_COMMAND} -z -i -E 's@install\(CODE.+CMAKE_INSTALL_INCLUDEDIR}/xrpl\)\n"\)\n+@@' cmake/XrplInstall.cmake
|
||||
|
||||
popd
|
||||
echo "Renaming complete."
|
||||
72
.github/scripts/rename/config.sh
vendored
Executable file
72
.github/scripts/rename/config.sh
vendored
Executable file
@@ -0,0 +1,72 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit the script as soon as an error occurs.
|
||||
set -e
|
||||
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script renames the config from `rippled.cfg` to `xrpld.cfg`, and updates
|
||||
# the code accordingly. The old filename will still be accepted.
|
||||
# Usage: .github/scripts/rename/config.sh <repository directory>
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <repository directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY=$1
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
if [ ! -d "${DIRECTORY}" ]; then
|
||||
echo "Error: Directory '${DIRECTORY}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
pushd ${DIRECTORY}
|
||||
|
||||
# Add the xrpld.cfg to the .gitignore.
|
||||
if ! grep -q 'xrpld.cfg' .gitignore; then
|
||||
${SED_COMMAND} -i '/rippled.cfg/a\
|
||||
/xrpld.cfg' .gitignore
|
||||
fi
|
||||
|
||||
# Rename the files.
|
||||
if [ -e rippled.cfg ]; then
|
||||
mv rippled.cfg xrpld.cfg
|
||||
fi
|
||||
if [ -e cfg/rippled-example.cfg ]; then
|
||||
mv cfg/rippled-example.cfg cfg/xrpld-example.cfg
|
||||
fi
|
||||
|
||||
# Rename inside the files.
|
||||
DIRECTORIES=("cfg" "cmake" "include" "src")
|
||||
for DIRECTORY in "${DIRECTORIES[@]}"; do
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.cmake" -o -name "*.txt" -o -name "*.cfg" -o -name "*.md" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i -E 's/rippled(-example)?[ .]cfg/xrpld\1.cfg/g' "${FILE}"
|
||||
done
|
||||
done
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/g' cfg/xrpld-example.cfg
|
||||
${SED_COMMAND} -i 's/rippled/xrpld/g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's/ripplevalidators/xrplvalidators/g' src/test/core/Config_test.cpp # cspell: disable-line
|
||||
${SED_COMMAND} -i 's/rippleConfig/xrpldConfig/g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's@ripple/@xrpld/@g' src/test/core/Config_test.cpp
|
||||
${SED_COMMAND} -i 's/Rippled/File/g' src/test/core/Config_test.cpp
|
||||
|
||||
|
||||
# Restore the old config file name in the code that maintains support for now.
|
||||
${SED_COMMAND} -i 's/configLegacyName = "xrpld.cfg"/configLegacyName = "rippled.cfg"/g' src/xrpld/core/detail/Config.cpp
|
||||
|
||||
# Restore an URL.
|
||||
${SED_COMMAND} -i 's/connect-your-xrpld-to-the-xrp-test-net.html/connect-your-rippled-to-the-xrp-test-net.html/g' cfg/xrpld-example.cfg
|
||||
|
||||
popd
|
||||
echo "Renaming complete."
|
||||
103
.github/scripts/rename/copyright.sh
vendored
Executable file
103
.github/scripts/rename/copyright.sh
vendored
Executable file
@@ -0,0 +1,103 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit the script as soon as an error occurs.
|
||||
set -e
|
||||
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script removes superfluous copyright notices in source and header files
|
||||
# in this project. Specifically, it removes all notices referencing Ripple,
|
||||
# XRPLF, and certain individual contributors upon mutual agreement, so the one
|
||||
# in the LICENSE.md file applies throughout. Copyright notices referencing
|
||||
# external contributions, e.g. from Bitcoin, remain as-is.
|
||||
# Usage: .github/scripts/rename/copyright.sh <repository directory>
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <repository directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY=$1
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
if [ ! -d "${DIRECTORY}" ]; then
|
||||
echo "Error: Directory '${DIRECTORY}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
pushd ${DIRECTORY}
|
||||
|
||||
# Prevent sed and echo from removing newlines and tabs in string literals by
|
||||
# temporarily replacing them with placeholders. This only affects one file.
|
||||
PLACEHOLDER_NEWLINE="__NEWLINE__"
|
||||
PLACEHOLDER_TAB="__TAB__"
|
||||
${SED_COMMAND} -i -E "s@\\\n@${PLACEHOLDER_NEWLINE}@g" src/test/rpc/ValidatorInfo_test.cpp
|
||||
${SED_COMMAND} -i -E "s@\\\t@${PLACEHOLDER_TAB}@g" src/test/rpc/ValidatorInfo_test.cpp
|
||||
|
||||
# Process the include/ and src/ directories.
|
||||
DIRECTORIES=("include" "src")
|
||||
for DIRECTORY in "${DIRECTORIES[@]}"; do
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" -o -name "*.macro" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
# Handle the cases where the copyright notice is enclosed in /* ... */
|
||||
# and usually surrounded by //---- and //======.
|
||||
${SED_COMMAND} -z -i -E 's@^//-------+\n+@@' "${FILE}"
|
||||
${SED_COMMAND} -z -i -E 's@^.*Copyright.+(Ripple|Bougalis|Falco|Hinnant|Null|Ritchford|XRPLF).+PERFORMANCE OF THIS SOFTWARE\.\n\*/\n+@@' "${FILE}" # cspell: ignore Bougalis Falco Hinnant Ritchford
|
||||
${SED_COMMAND} -z -i -E 's@^//=======+\n+@@' "${FILE}"
|
||||
|
||||
# Handle the cases where the copyright notice is commented out with //.
|
||||
${SED_COMMAND} -z -i -E 's@^//\n// Copyright.+Falco \(vinnie dot falco at gmail dot com\)\n//\n+@@' "${FILE}" # cspell: ignore Vinnie Falco
|
||||
done
|
||||
done
|
||||
|
||||
# Restore copyright notices that were removed from specific files, without
|
||||
# restoring the verbiage that is already present in LICENSE.md. Ensure that if
|
||||
# the script is run multiple times, duplicate notices are not added.
|
||||
if ! grep -q 'Raw Material Software' include/xrpl/beast/core/CurrentThreadName.h; then
|
||||
echo -e "// Portions of this file are from JUCE (http://www.juce.com).\n// Copyright (c) 2013 - Raw Material Software Ltd.\n// Please visit http://www.juce.com\n\n$(cat include/xrpl/beast/core/CurrentThreadName.h)" > include/xrpl/beast/core/CurrentThreadName.h
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/test/app/NetworkID_test.cpp; then
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/app/NetworkID_test.cpp)" > src/test/app/NetworkID_test.cpp
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/test/app/tx/apply_test.cpp; then
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/app/tx/apply_test.cpp)" > src/test/app/tx/apply_test.cpp
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/test/rpc/ManifestRPC_test.cpp; then
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/rpc/ManifestRPC_test.cpp)" > src/test/rpc/ManifestRPC_test.cpp
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/test/rpc/ValidatorInfo_test.cpp; then
|
||||
echo -e "// Copyright (c) 2020 Dev Null Productions\n\n$(cat src/test/rpc/ValidatorInfo_test.cpp)" > src/test/rpc/ValidatorInfo_test.cpp
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/xrpld/rpc/handlers/DoManifest.cpp; then
|
||||
echo -e "// Copyright (c) 2019 Dev Null Productions\n\n$(cat src/xrpld/rpc/handlers/DoManifest.cpp)" > src/xrpld/rpc/handlers/DoManifest.cpp
|
||||
fi
|
||||
if ! grep -q 'Dev Null' src/xrpld/rpc/handlers/ValidatorInfo.cpp; then
|
||||
echo -e "// Copyright (c) 2019 Dev Null Productions\n\n$(cat src/xrpld/rpc/handlers/ValidatorInfo.cpp)" > src/xrpld/rpc/handlers/ValidatorInfo.cpp
|
||||
fi
|
||||
if ! grep -q 'Bougalis' include/xrpl/basics/SlabAllocator.h; then
|
||||
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/SlabAllocator.h)" > include/xrpl/basics/SlabAllocator.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
fi
|
||||
if ! grep -q 'Bougalis' include/xrpl/basics/spinlock.h; then
|
||||
echo -e "// Copyright (c) 2022, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/spinlock.h)" > include/xrpl/basics/spinlock.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
fi
|
||||
if ! grep -q 'Bougalis' include/xrpl/basics/tagged_integer.h; then
|
||||
echo -e "// Copyright (c) 2014, Nikolaos D. Bougalis <nikb@bougalis.net>\n\n$(cat include/xrpl/basics/tagged_integer.h)" > include/xrpl/basics/tagged_integer.h # cspell: ignore Nikolaos Bougalis nikb
|
||||
fi
|
||||
if ! grep -q 'Ritchford' include/xrpl/beast/utility/Zero.h; then
|
||||
echo -e "// Copyright (c) 2014, Tom Ritchford <tom@swirly.com>\n\n$(cat include/xrpl/beast/utility/Zero.h)" > include/xrpl/beast/utility/Zero.h # cspell: ignore Ritchford
|
||||
fi
|
||||
|
||||
# Restore newlines and tabs in string literals in the affected file.
|
||||
${SED_COMMAND} -i -E "s@${PLACEHOLDER_NEWLINE}@\\\n@g" src/test/rpc/ValidatorInfo_test.cpp
|
||||
${SED_COMMAND} -i -E "s@${PLACEHOLDER_TAB}@\\\t@g" src/test/rpc/ValidatorInfo_test.cpp
|
||||
|
||||
popd
|
||||
echo "Removal complete."
|
||||
42
.github/scripts/rename/definitions.sh
vendored
Executable file
42
.github/scripts/rename/definitions.sh
vendored
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit the script as soon as an error occurs.
|
||||
set -e
|
||||
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script renames definitions, such as include guards, in this project.
|
||||
# Specifically, it renames "RIPPLED_XXX" and "RIPPLE_XXX" to "XRPL_XXX" by
|
||||
# scanning all cmake, header, and source files in the specified directory and
|
||||
# its subdirectories.
|
||||
# Usage: .github/scripts/rename/definitions.sh <repository directory>
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <repository directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY=$1
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
if [ ! -d "${DIRECTORY}" ]; then
|
||||
echo "Error: Directory '${DIRECTORY}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i -E 's@#(define|endif|if|ifdef|ifndef)(.*)(RIPPLED_|RIPPLE_)([A-Z0-9_]+)@#\1\2XRPL_\4@g' "${FILE}"
|
||||
done
|
||||
find "${DIRECTORY}" -type f \( -name "*.cmake" -o -name "*.txt" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i -E 's@(RIPPLED_|RIPPLE_)([A-Z0-9_]+)@XRPL_\2@g' "${FILE}"
|
||||
done
|
||||
echo "Renaming complete."
|
||||
30
.github/scripts/rename/include.sh
vendored
Executable file
30
.github/scripts/rename/include.sh
vendored
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit the script as soon as an error occurs.
|
||||
set -e
|
||||
|
||||
# This script checks whether there are no new include guards introduced by a new
|
||||
# PR, as header files should use "#pragma once" instead. The script assumes any
|
||||
# include guards will use "XRPL_" as prefix.
|
||||
# Usage: .github/scripts/rename/include.sh <repository directory>
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <repository directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY=$1
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
if [ ! -d "${DIRECTORY}" ]; then
|
||||
echo "Error: Directory '${DIRECTORY}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
if grep -q "#ifndef XRPL_" "${FILE}"; then
|
||||
echo "Please replace all include guards by #pragma once."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "Checking complete."
|
||||
58
.github/scripts/rename/namespace.sh
vendored
Executable file
58
.github/scripts/rename/namespace.sh
vendored
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Exit the script as soon as an error occurs.
|
||||
set -e
|
||||
|
||||
# On MacOS, ensure that GNU sed is installed and available as `gsed`.
|
||||
SED_COMMAND=sed
|
||||
if [[ "${OSTYPE}" == 'darwin'* ]]; then
|
||||
if ! command -v gsed &> /dev/null; then
|
||||
echo "Error: gsed is not installed. Please install it using 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
SED_COMMAND=gsed
|
||||
fi
|
||||
|
||||
# This script renames the `ripple` namespace to `xrpl` in this project.
|
||||
# Specifically, it renames all occurrences of `namespace ripple` and `ripple::`
|
||||
# to `namespace xrpl` and `xrpl::`, respectively, by scanning all header and
|
||||
# source files in the specified directory and its subdirectories, as well as any
|
||||
# occurrences in the documentation. It also renames them in the test suites.
|
||||
# Usage: .github/scripts/rename/namespace.sh <repository directory>
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "Usage: $0 <repository directory>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DIRECTORY=$1
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
if [ ! -d "${DIRECTORY}" ]; then
|
||||
echo "Error: Directory '${DIRECTORY}' does not exist."
|
||||
exit 1
|
||||
fi
|
||||
pushd ${DIRECTORY}
|
||||
|
||||
DIRECTORIES=("include" "src" "tests")
|
||||
for DIRECTORY in "${DIRECTORIES[@]}"; do
|
||||
echo "Processing directory: ${DIRECTORY}"
|
||||
|
||||
find "${DIRECTORY}" -type f \( -name "*.h" -o -name "*.hpp" -o -name "*.ipp" -o -name "*.cpp" \) | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i 's/namespace ripple/namespace xrpl/g' "${FILE}"
|
||||
${SED_COMMAND} -i 's/ripple::/xrpl::/g' "${FILE}"
|
||||
${SED_COMMAND} -i -E 's/(BEAST_DEFINE_TESTSUITE.+)ripple(.+)/\1xrpl\2/g' "${FILE}"
|
||||
done
|
||||
done
|
||||
|
||||
# Special case for NuDBFactory that has ripple twice in the test suite name.
|
||||
${SED_COMMAND} -i -E 's/(BEAST_DEFINE_TESTSUITE.+)ripple(.+)/\1xrpl\2/g' src/test/nodestore/NuDBFactory_test.cpp
|
||||
|
||||
DIRECTORY=$1
|
||||
find "${DIRECTORY}" -type f -name "*.md" | while read -r FILE; do
|
||||
echo "Processing file: ${FILE}"
|
||||
${SED_COMMAND} -i 's/ripple::/xrpl::/g' "${FILE}"
|
||||
done
|
||||
|
||||
popd
|
||||
echo "Renaming complete."
|
||||
333
.github/scripts/strategy-matrix/generate.py
vendored
Executable file
333
.github/scripts/strategy-matrix/generate.py
vendored
Executable file
@@ -0,0 +1,333 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import itertools
|
||||
import json
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
THIS_DIR = Path(__file__).parent.resolve()
|
||||
|
||||
|
||||
@dataclass
|
||||
class Config:
|
||||
architecture: list[dict]
|
||||
os: list[dict]
|
||||
build_type: list[str]
|
||||
cmake_args: list[str]
|
||||
|
||||
|
||||
"""
|
||||
Generate a strategy matrix for GitHub Actions CI.
|
||||
|
||||
On each PR commit we will build a selection of Debian, RHEL, Ubuntu, MacOS, and
|
||||
Windows configurations, while upon merge into the develop or release branches,
|
||||
we will build all configurations, and test most of them.
|
||||
|
||||
We will further set additional CMake arguments as follows:
|
||||
- All builds will have the `tests`, `werr`, and `xrpld` options.
|
||||
- All builds will have the `wextra` option except for GCC 12 and Clang 16.
|
||||
- All release builds will have the `assert` option.
|
||||
- Certain Debian Bookworm configurations will change the reference fee, enable
|
||||
codecov, and enable voidstar in PRs.
|
||||
"""
|
||||
|
||||
|
||||
def generate_strategy_matrix(all: bool, config: Config) -> list:
|
||||
configurations = []
|
||||
for architecture, os, build_type, cmake_args in itertools.product(
|
||||
config.architecture, config.os, config.build_type, config.cmake_args
|
||||
):
|
||||
# The default CMake target is 'all' for Linux and MacOS and 'install'
|
||||
# for Windows, but it can get overridden for certain configurations.
|
||||
cmake_target = "install" if os["distro_name"] == "windows" else "all"
|
||||
|
||||
# We build and test all configurations by default, except for Windows in
|
||||
# Debug, because it is too slow, as well as when code coverage is
|
||||
# enabled as that mode already runs the tests.
|
||||
build_only = False
|
||||
if os["distro_name"] == "windows" and build_type == "Debug":
|
||||
build_only = True
|
||||
|
||||
# Only generate a subset of configurations in PRs.
|
||||
if not all:
|
||||
# Debian:
|
||||
# - Bookworm using GCC 13: Release on linux/amd64, set the reference
|
||||
# fee to 500.
|
||||
# - Bookworm using GCC 15: Debug on linux/amd64, enable code
|
||||
# coverage (which will be done below).
|
||||
# - Bookworm using Clang 16: Debug on linux/arm64, enable voidstar.
|
||||
# - Bookworm using Clang 17: Release on linux/amd64, set the
|
||||
# reference fee to 1000.
|
||||
# - Bookworm using Clang 20: Debug on linux/amd64.
|
||||
if os["distro_name"] == "debian":
|
||||
skip = True
|
||||
if os["distro_version"] == "bookworm":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-13"
|
||||
and build_type == "Release"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=500 {cmake_args}"
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-16"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
cmake_args = f"-Dvoidstar=ON {cmake_args}"
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-17"
|
||||
and build_type == "Release"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"-DUNIT_TEST_REFERENCE_FEE=1000 {cmake_args}"
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if skip:
|
||||
continue
|
||||
|
||||
# RHEL:
|
||||
# - 9 using GCC 12: Debug on linux/amd64.
|
||||
# - 10 using Clang: Release on linux/amd64.
|
||||
if os["distro_name"] == "rhel":
|
||||
skip = True
|
||||
if os["distro_version"] == "9":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
elif os["distro_version"] == "10":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-any"
|
||||
and build_type == "Release"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if skip:
|
||||
continue
|
||||
|
||||
# Ubuntu:
|
||||
# - Jammy using GCC 12: Debug on linux/arm64.
|
||||
# - Noble using GCC 14: Release on linux/amd64.
|
||||
# - Noble using Clang 18: Debug on linux/amd64.
|
||||
# - Noble using Clang 19: Release on linux/arm64.
|
||||
if os["distro_name"] == "ubuntu":
|
||||
skip = True
|
||||
if os["distro_version"] == "jammy":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
skip = False
|
||||
elif os["distro_version"] == "noble":
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-14"
|
||||
and build_type == "Release"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-18"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
skip = False
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}" == "clang-19"
|
||||
and build_type == "Release"
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
skip = False
|
||||
if skip:
|
||||
continue
|
||||
|
||||
# MacOS:
|
||||
# - Debug on macos/arm64.
|
||||
if os["distro_name"] == "macos" and not (
|
||||
build_type == "Debug" and architecture["platform"] == "macos/arm64"
|
||||
):
|
||||
continue
|
||||
|
||||
# Windows:
|
||||
# - Release on windows/amd64.
|
||||
if os["distro_name"] == "windows" and not (
|
||||
build_type == "Release" and architecture["platform"] == "windows/amd64"
|
||||
):
|
||||
continue
|
||||
|
||||
# Additional CMake arguments.
|
||||
cmake_args = f"{cmake_args} -Dtests=ON -Dwerr=ON -Dxrpld=ON"
|
||||
if not f"{os['compiler_name']}-{os['compiler_version']}" in [
|
||||
"gcc-12",
|
||||
"clang-16",
|
||||
]:
|
||||
cmake_args = f"{cmake_args} -Dwextra=ON"
|
||||
if build_type == "Release":
|
||||
cmake_args = f"{cmake_args} -Dassert=ON"
|
||||
|
||||
# We skip all RHEL on arm64 due to a build failure that needs further
|
||||
# investigation.
|
||||
if os["distro_name"] == "rhel" and architecture["platform"] == "linux/arm64":
|
||||
continue
|
||||
|
||||
# We skip all clang 20+ on arm64 due to Boost build error.
|
||||
if (
|
||||
f"{os['compiler_name']}-{os['compiler_version']}"
|
||||
in ["clang-20", "clang-21"]
|
||||
and architecture["platform"] == "linux/arm64"
|
||||
):
|
||||
continue
|
||||
|
||||
# Enable code coverage for Debian Bookworm using GCC 15 in Debug on
|
||||
# linux/amd64
|
||||
if (
|
||||
f"{os['distro_name']}-{os['distro_version']}" == "debian-bookworm"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-15"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"{cmake_args} -Dcoverage=ON -Dcoverage_format=xml -DCODE_COVERAGE_VERBOSE=ON -DCMAKE_C_FLAGS=-O0 -DCMAKE_CXX_FLAGS=-O0"
|
||||
|
||||
# Enable unity build for Ubuntu Jammy using GCC 12 in Debug on
|
||||
# linux/amd64.
|
||||
if (
|
||||
f"{os['distro_name']}-{os['distro_version']}" == "ubuntu-jammy"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "gcc-12"
|
||||
and build_type == "Debug"
|
||||
and architecture["platform"] == "linux/amd64"
|
||||
):
|
||||
cmake_args = f"{cmake_args} -Dunity=ON"
|
||||
|
||||
# Generate a unique name for the configuration, e.g. macos-arm64-debug
|
||||
# or debian-bookworm-gcc-12-amd64-release.
|
||||
config_name = os["distro_name"]
|
||||
if (n := os["distro_version"]) != "":
|
||||
config_name += f"-{n}"
|
||||
if (n := os["compiler_name"]) != "":
|
||||
config_name += f"-{n}"
|
||||
if (n := os["compiler_version"]) != "":
|
||||
config_name += f"-{n}"
|
||||
config_name += (
|
||||
f"-{architecture['platform'][architecture['platform'].find('/')+1:]}"
|
||||
)
|
||||
config_name += f"-{build_type.lower()}"
|
||||
if "-Dcoverage=ON" in cmake_args:
|
||||
config_name += "-coverage"
|
||||
if "-Dunity=ON" in cmake_args:
|
||||
config_name += "-unity"
|
||||
|
||||
# Add the configuration to the list, with the most unique fields first,
|
||||
# so that they are easier to identify in the GitHub Actions UI, as long
|
||||
# names get truncated.
|
||||
# Add Address and Thread (both coupled with UB) sanitizers for specific bookworm distros.
|
||||
# GCC-Asan rippled-embedded tests are failing because of https://github.com/google/sanitizers/issues/856
|
||||
if (
|
||||
os["distro_version"] == "bookworm"
|
||||
and f"{os['compiler_name']}-{os['compiler_version']}" == "clang-20"
|
||||
):
|
||||
# Add ASAN + UBSAN configuration.
|
||||
configurations.append(
|
||||
{
|
||||
"config_name": config_name + "-asan-ubsan",
|
||||
"cmake_args": cmake_args,
|
||||
"cmake_target": cmake_target,
|
||||
"build_only": build_only,
|
||||
"build_type": build_type,
|
||||
"os": os,
|
||||
"architecture": architecture,
|
||||
"sanitizers": "address,undefinedbehavior",
|
||||
}
|
||||
)
|
||||
# TSAN is deactivated due to seg faults with latest compilers.
|
||||
activate_tsan = False
|
||||
if activate_tsan:
|
||||
configurations.append(
|
||||
{
|
||||
"config_name": config_name + "-tsan-ubsan",
|
||||
"cmake_args": cmake_args,
|
||||
"cmake_target": cmake_target,
|
||||
"build_only": build_only,
|
||||
"build_type": build_type,
|
||||
"os": os,
|
||||
"architecture": architecture,
|
||||
"sanitizers": "thread,undefinedbehavior",
|
||||
}
|
||||
)
|
||||
else:
|
||||
configurations.append(
|
||||
{
|
||||
"config_name": config_name,
|
||||
"cmake_args": cmake_args,
|
||||
"cmake_target": cmake_target,
|
||||
"build_only": build_only,
|
||||
"build_type": build_type,
|
||||
"os": os,
|
||||
"architecture": architecture,
|
||||
"sanitizers": "",
|
||||
}
|
||||
)
|
||||
|
||||
return configurations
|
||||
|
||||
|
||||
def read_config(file: Path) -> Config:
|
||||
config = json.loads(file.read_text())
|
||||
if (
|
||||
config["architecture"] is None
|
||||
or config["os"] is None
|
||||
or config["build_type"] is None
|
||||
or config["cmake_args"] is None
|
||||
):
|
||||
raise Exception("Invalid configuration file.")
|
||||
|
||||
return Config(**config)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"-a",
|
||||
"--all",
|
||||
help="Set to generate all configurations (generally used when merging a PR) or leave unset to generate a subset of configurations (generally used when committing to a PR).",
|
||||
action="store_true",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
"--config",
|
||||
help="Path to the JSON file containing the strategy matrix configurations.",
|
||||
required=False,
|
||||
type=Path,
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
matrix = []
|
||||
if args.config is None or args.config == "":
|
||||
matrix += generate_strategy_matrix(
|
||||
args.all, read_config(THIS_DIR / "linux.json")
|
||||
)
|
||||
matrix += generate_strategy_matrix(
|
||||
args.all, read_config(THIS_DIR / "macos.json")
|
||||
)
|
||||
matrix += generate_strategy_matrix(
|
||||
args.all, read_config(THIS_DIR / "windows.json")
|
||||
)
|
||||
else:
|
||||
matrix += generate_strategy_matrix(args.all, read_config(args.config))
|
||||
|
||||
# Generate the strategy matrix.
|
||||
print(f"matrix={json.dumps({'include': matrix})}")
|
||||
212
.github/scripts/strategy-matrix/linux.json
vendored
Normal file
212
.github/scripts/strategy-matrix/linux.json
vendored
Normal file
@@ -0,0 +1,212 @@
|
||||
{
|
||||
"architecture": [
|
||||
{
|
||||
"platform": "linux/amd64",
|
||||
"runner": ["self-hosted", "Linux", "X64", "heavy"]
|
||||
},
|
||||
{
|
||||
"platform": "linux/arm64",
|
||||
"runner": ["self-hosted", "Linux", "ARM64", "heavy-arm64"]
|
||||
}
|
||||
],
|
||||
"os": [
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "15",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "16",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "17",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "18",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "19",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "bookworm",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "20",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "15",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "20",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "debian",
|
||||
"distro_version": "trixie",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "21",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "8",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "8",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "any",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "9",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "any",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "10",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "rhel",
|
||||
"distro_version": "10",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "any",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "jammy",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "12",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "13",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "gcc",
|
||||
"compiler_version": "14",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "16",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "17",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "18",
|
||||
"image_sha": "ab4d1f0"
|
||||
},
|
||||
{
|
||||
"distro_name": "ubuntu",
|
||||
"distro_version": "noble",
|
||||
"compiler_name": "clang",
|
||||
"compiler_version": "19",
|
||||
"image_sha": "ab4d1f0"
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": [""]
|
||||
}
|
||||
19
.github/scripts/strategy-matrix/macos.json
vendored
Normal file
19
.github/scripts/strategy-matrix/macos.json
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"architecture": [
|
||||
{
|
||||
"platform": "macos/arm64",
|
||||
"runner": ["self-hosted", "macOS", "ARM64", "mac-runner-m1"]
|
||||
}
|
||||
],
|
||||
"os": [
|
||||
{
|
||||
"distro_name": "macos",
|
||||
"distro_version": "",
|
||||
"compiler_name": "",
|
||||
"compiler_version": "",
|
||||
"image_sha": ""
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": ["-DCMAKE_POLICY_VERSION_MINIMUM=3.5"]
|
||||
}
|
||||
19
.github/scripts/strategy-matrix/windows.json
vendored
Normal file
19
.github/scripts/strategy-matrix/windows.json
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"architecture": [
|
||||
{
|
||||
"platform": "windows/amd64",
|
||||
"runner": ["self-hosted", "Windows", "devbox"]
|
||||
}
|
||||
],
|
||||
"os": [
|
||||
{
|
||||
"distro_name": "windows",
|
||||
"distro_version": "",
|
||||
"compiler_name": "",
|
||||
"compiler_version": "",
|
||||
"image_sha": ""
|
||||
}
|
||||
],
|
||||
"build_type": ["Debug", "Release"],
|
||||
"cmake_args": [""]
|
||||
}
|
||||
61
.github/workflows/clang-format.yml
vendored
61
.github/workflows/clang-format.yml
vendored
@@ -1,61 +0,0 @@
|
||||
name: clang-format
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-20.04
|
||||
env:
|
||||
CLANG_VERSION: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install clang-format
|
||||
run: |
|
||||
codename=$( lsb_release --codename --short )
|
||||
sudo tee /etc/apt/sources.list.d/llvm.list >/dev/null <<EOF
|
||||
deb http://apt.llvm.org/${codename}/ llvm-toolchain-${codename}-${CLANG_VERSION} main
|
||||
deb-src http://apt.llvm.org/${codename}/ llvm-toolchain-${codename}-${CLANG_VERSION} main
|
||||
EOF
|
||||
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add
|
||||
sudo apt-get update
|
||||
sudo apt-get install clang-format-${CLANG_VERSION}
|
||||
- name: Format src/ripple
|
||||
run: find src/ripple -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-${CLANG_VERSION} -i
|
||||
- name: Format src/test
|
||||
run: find src/test -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -print0 | xargs -0 clang-format-${CLANG_VERSION} -i
|
||||
- name: Check for differences
|
||||
id: assert
|
||||
run: |
|
||||
set -o pipefail
|
||||
git diff --exit-code | tee "clang-format.patch"
|
||||
- name: Upload patch
|
||||
if: failure() && steps.assert.outcome == 'failure'
|
||||
uses: actions/upload-artifact@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: clang-format.patch
|
||||
if-no-files-found: ignore
|
||||
path: clang-format.patch
|
||||
- name: What happened?
|
||||
if: failure() && steps.assert.outcome == 'failure'
|
||||
env:
|
||||
PREAMBLE: |
|
||||
If you are reading this, you are looking at a failed Github Actions
|
||||
job. That means you pushed one or more files that did not conform
|
||||
to the formatting specified in .clang-format. That may be because
|
||||
you neglected to run 'git clang-format' or 'clang-format' before
|
||||
committing, or that your version of clang-format has an
|
||||
incompatibility with the one on this
|
||||
machine, which is:
|
||||
SUGGESTION: |
|
||||
|
||||
To fix it, you can do one of two things:
|
||||
1. Download and apply the patch generated as an artifact of this
|
||||
job to your repo, commit, and push.
|
||||
2. Run 'git-clang-format --extensions c,cpp,h,cxx,ipp develop'
|
||||
in your repo, commit, and push.
|
||||
run: |
|
||||
echo "${PREAMBLE}"
|
||||
clang-format-${CLANG_VERSION} --version
|
||||
echo "${SUGGESTION}"
|
||||
exit 1
|
||||
36
.github/workflows/doxygen.yml
vendored
36
.github/workflows/doxygen.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: Build and publish Doxygen documentation
|
||||
# To test this workflow, push your changes to your fork's `develop` branch.
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
job:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
container: rippleci/rippled-build-ubuntu:aaf5e3e
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: check environment
|
||||
run: |
|
||||
echo ${PATH} | tr ':' '\n'
|
||||
cmake --version
|
||||
doxygen --version
|
||||
env | sort
|
||||
- name: build
|
||||
run: |
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -Donly_docs=TRUE ..
|
||||
cmake --build . --target docs --parallel $(nproc)
|
||||
- name: publish
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: build/docs/html
|
||||
49
.github/workflows/levelization.yml
vendored
49
.github/workflows/levelization.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: levelization
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
CLANG_VERSION: 10
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check levelization
|
||||
run: Builds/levelization/levelization.sh
|
||||
- name: Check for differences
|
||||
id: assert
|
||||
run: |
|
||||
set -o pipefail
|
||||
git diff --exit-code | tee "levelization.patch"
|
||||
- name: Upload patch
|
||||
if: failure() && steps.assert.outcome == 'failure'
|
||||
uses: actions/upload-artifact@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: levelization.patch
|
||||
if-no-files-found: ignore
|
||||
path: levelization.patch
|
||||
- name: What happened?
|
||||
if: failure() && steps.assert.outcome == 'failure'
|
||||
env:
|
||||
MESSAGE: |
|
||||
If you are reading this, you are looking at a failed Github
|
||||
Actions job. That means you changed the dependency relationships
|
||||
between the modules in rippled. That may be an improvement or a
|
||||
regression. This check doesn't judge.
|
||||
|
||||
A rule of thumb, though, is that if your changes caused
|
||||
something to be removed from loops.txt, that's probably an
|
||||
improvement. If something was added, it's probably a regression.
|
||||
|
||||
To fix it, you can do one of two things:
|
||||
1. Download and apply the patch generated as an artifact of this
|
||||
job to your repo, commit, and push.
|
||||
2. Run './Builds/levelization/levelization.sh' in your repo,
|
||||
commit, and push.
|
||||
|
||||
See Builds/levelization/README.md for more info.
|
||||
run: |
|
||||
echo "${MESSAGE}"
|
||||
exit 1
|
||||
71
.github/workflows/macos.yml
vendored
71
.github/workflows/macos.yml
vendored
@@ -1,71 +0,0 @@
|
||||
name: macos
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
# If the branches list is ever changed, be sure to change it on all
|
||||
# build/test jobs (nix, macos, windows)
|
||||
branches:
|
||||
# Always build the package branches
|
||||
- develop
|
||||
- release
|
||||
- master
|
||||
# Branches that opt-in to running
|
||||
- 'ci/**'
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
platform:
|
||||
- macos
|
||||
generator:
|
||||
- Ninja
|
||||
configuration:
|
||||
- Release
|
||||
runs-on: [self-hosted, macOS]
|
||||
env:
|
||||
# The `build` action requires these variables.
|
||||
build_dir: .build
|
||||
NUM_PROCESSORS: 12
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: install Conan
|
||||
run: |
|
||||
brew install conan@1
|
||||
echo '/opt/homebrew/opt/conan@1/bin' >> $GITHUB_PATH
|
||||
- name: install Ninja
|
||||
if: matrix.generator == 'Ninja'
|
||||
run: brew install ninja
|
||||
- name: check environment
|
||||
run: |
|
||||
env | sort
|
||||
echo ${PATH} | tr ':' '\n'
|
||||
python --version
|
||||
conan --version
|
||||
cmake --version
|
||||
- name: configure Conan
|
||||
run : |
|
||||
conan profile new default --detect || true
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default
|
||||
- name: build dependencies
|
||||
uses: ./.github/actions/dependencies
|
||||
env:
|
||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
|
||||
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
|
||||
with:
|
||||
configuration: ${{ matrix.configuration }}
|
||||
- name: build
|
||||
uses: ./.github/actions/build
|
||||
with:
|
||||
generator: ${{ matrix.generator }}
|
||||
configuration: ${{ matrix.configuration }}
|
||||
- name: test
|
||||
run: |
|
||||
${build_dir}/rippled --unittest
|
||||
234
.github/workflows/nix.yml
vendored
234
.github/workflows/nix.yml
vendored
@@ -1,234 +0,0 @@
|
||||
name: nix
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
# If the branches list is ever changed, be sure to change it on all
|
||||
# build/test jobs (nix, macos, windows)
|
||||
branches:
|
||||
# Always build the package branches
|
||||
- develop
|
||||
- release
|
||||
- master
|
||||
# Branches that opt-in to running
|
||||
- 'ci/**'
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
# This workflow has two job matrixes.
|
||||
# They can be considered phases because the second matrix ("test")
|
||||
# depends on the first ("dependencies").
|
||||
#
|
||||
# The first phase has a job in the matrix for each combination of
|
||||
# variables that affects dependency ABI:
|
||||
# platform, compiler, and configuration.
|
||||
# It creates a GitHub artifact holding the Conan profile,
|
||||
# and builds and caches binaries for all the dependencies.
|
||||
# If an Artifactory remote is configured, they are cached there.
|
||||
# If not, they are added to the GitHub artifact.
|
||||
# GitHub's "cache" action has a size limit (10 GB) that is too small
|
||||
# to hold the binaries if they are built locally.
|
||||
# We must use the "{upload,download}-artifact" actions instead.
|
||||
#
|
||||
# The second phase has a job in the matrix for each test configuration.
|
||||
# It installs dependency binaries from the cache, whichever was used,
|
||||
# and builds and tests rippled.
|
||||
|
||||
jobs:
|
||||
|
||||
dependencies:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- linux
|
||||
compiler:
|
||||
- gcc
|
||||
- clang
|
||||
configuration:
|
||||
- Debug
|
||||
- Release
|
||||
include:
|
||||
- compiler: gcc
|
||||
profile:
|
||||
version: 11
|
||||
cc: /usr/bin/gcc
|
||||
cxx: /usr/bin/g++
|
||||
- compiler: clang
|
||||
profile:
|
||||
version: 14
|
||||
cc: /usr/bin/clang-14
|
||||
cxx: /usr/bin/clang++-14
|
||||
runs-on: [self-hosted, heavy]
|
||||
container: rippleci/rippled-build-ubuntu:aaf5e3e
|
||||
env:
|
||||
build_dir: .build
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: check environment
|
||||
run: |
|
||||
echo ${PATH} | tr ':' '\n'
|
||||
conan --version
|
||||
cmake --version
|
||||
env | sort
|
||||
- name: configure Conan
|
||||
run: |
|
||||
conan profile new default --detect
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
conan profile update settings.compiler=${{ matrix.compiler }} default
|
||||
conan profile update settings.compiler.version=${{ matrix.profile.version }} default
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
||||
conan profile update env.CC=${{ matrix.profile.cc }} default
|
||||
conan profile update env.CXX=${{ matrix.profile.cxx }} default
|
||||
conan profile update conf.tools.build:compiler_executables='{"c": "${{ matrix.profile.cc }}", "cpp": "${{ matrix.profile.cxx }}"}' default
|
||||
- name: archive profile
|
||||
# Create this archive before dependencies are added to the local cache.
|
||||
run: tar -czf conan.tar -C ~/.conan .
|
||||
- name: build dependencies
|
||||
uses: ./.github/actions/dependencies
|
||||
env:
|
||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
|
||||
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
|
||||
with:
|
||||
configuration: ${{ matrix.configuration }}
|
||||
- name: upload archive
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
||||
path: conan.tar
|
||||
if-no-files-found: error
|
||||
|
||||
|
||||
test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- linux
|
||||
compiler:
|
||||
- gcc
|
||||
- clang
|
||||
configuration:
|
||||
- Debug
|
||||
- Release
|
||||
cmake-args:
|
||||
-
|
||||
- "-Dunity=ON"
|
||||
needs: dependencies
|
||||
runs-on: [self-hosted, heavy]
|
||||
container: rippleci/rippled-build-ubuntu:aaf5e3e
|
||||
env:
|
||||
build_dir: .build
|
||||
steps:
|
||||
- name: download cache
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
||||
- name: extract cache
|
||||
run: |
|
||||
mkdir -p ~/.conan
|
||||
tar -xzf conan.tar -C ~/.conan
|
||||
- name: check environment
|
||||
run: |
|
||||
env | sort
|
||||
echo ${PATH} | tr ':' '\n'
|
||||
conan --version
|
||||
cmake --version
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: dependencies
|
||||
uses: ./.github/actions/dependencies
|
||||
env:
|
||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
with:
|
||||
configuration: ${{ matrix.configuration }}
|
||||
- name: build
|
||||
uses: ./.github/actions/build
|
||||
with:
|
||||
generator: Ninja
|
||||
configuration: ${{ matrix.configuration }}
|
||||
cmake-args: ${{ matrix.cmake-args }}
|
||||
- name: test
|
||||
run: |
|
||||
${build_dir}/rippled --unittest --unittest-jobs $(nproc)
|
||||
|
||||
|
||||
coverage:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform:
|
||||
- linux
|
||||
compiler:
|
||||
- gcc
|
||||
configuration:
|
||||
- Debug
|
||||
needs: dependencies
|
||||
runs-on: [self-hosted, heavy]
|
||||
container: rippleci/rippled-build-ubuntu:aaf5e3e
|
||||
env:
|
||||
build_dir: .build
|
||||
steps:
|
||||
- name: download cache
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
|
||||
- name: extract cache
|
||||
run: |
|
||||
mkdir -p ~/.conan
|
||||
tar -xzf conan.tar -C ~/.conan
|
||||
- name: install gcovr
|
||||
run: pip install "gcovr>=7,<8"
|
||||
- name: check environment
|
||||
run: |
|
||||
echo ${PATH} | tr ':' '\n'
|
||||
conan --version
|
||||
cmake --version
|
||||
gcovr --version
|
||||
env | sort
|
||||
ls ~/.conan
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: dependencies
|
||||
uses: ./.github/actions/dependencies
|
||||
env:
|
||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
with:
|
||||
configuration: ${{ matrix.configuration }}
|
||||
- name: build
|
||||
uses: ./.github/actions/build
|
||||
with:
|
||||
generator: Ninja
|
||||
configuration: ${{ matrix.configuration }}
|
||||
cmake-args: >-
|
||||
-Dcoverage=ON
|
||||
-Dcoverage_format=xml
|
||||
-DCODE_COVERAGE_VERBOSE=ON
|
||||
-DCMAKE_CXX_FLAGS="-O0"
|
||||
-DCMAKE_C_FLAGS="-O0"
|
||||
cmake-target: coverage
|
||||
- name: move coverage report
|
||||
shell: bash
|
||||
run: |
|
||||
mv "${build_dir}/coverage.xml" ./
|
||||
- name: archive coverage report
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage.xml
|
||||
path: coverage.xml
|
||||
retention-days: 30
|
||||
- name: upload coverage report
|
||||
uses: wandalen/wretry.action@v1.4.10
|
||||
with:
|
||||
action: codecov/codecov-action@v4.3.0
|
||||
with: |
|
||||
files: coverage.xml
|
||||
fail_ci_if_error: true
|
||||
disable_search: true
|
||||
verbose: true
|
||||
plugin: noop
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
attempt_limit: 5
|
||||
attempt_delay: 210000 # in milliseconds
|
||||
165
.github/workflows/on-pr.yml
vendored
Normal file
165
.github/workflows/on-pr.yml
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
# This workflow runs all workflows to check, build and test the project on
|
||||
# various Linux flavors, as well as on MacOS and Windows, on every push to a
|
||||
# user branch. However, it will not run if the pull request is a draft unless it
|
||||
# has the 'DraftRunCI' label. For commits to PRs that target a release branch,
|
||||
# it also uploads the libxrpl recipe to the Conan remote.
|
||||
name: PR
|
||||
|
||||
on:
|
||||
merge_group:
|
||||
types:
|
||||
- checks_requested
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
# This job determines whether the rest of the workflow should run. It runs
|
||||
# when the PR is not a draft (which should also cover merge-group) or
|
||||
# has the 'DraftRunCI' label.
|
||||
should-run:
|
||||
if: ${{ !github.event.pull_request.draft || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Determine changed files
|
||||
# This step checks whether any files have changed that should
|
||||
# cause the next jobs to run. We do it this way rather than
|
||||
# using `paths` in the `on:` section, because all required
|
||||
# checks must pass, even for changes that do not modify anything
|
||||
# that affects those checks. We would therefore like to make the
|
||||
# checks required only if the job runs, but GitHub does not
|
||||
# support that directly. By always executing the workflow on new
|
||||
# commits and by using the changed-files action below, we ensure
|
||||
# that Github considers any skipped jobs to have passed, and in
|
||||
# turn the required checks as well.
|
||||
id: changes
|
||||
uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5
|
||||
with:
|
||||
files: |
|
||||
# These paths are unique to `on-pr.yml`.
|
||||
.github/scripts/levelization/**
|
||||
.github/scripts/rename/**
|
||||
.github/workflows/reusable-check-levelization.yml
|
||||
.github/workflows/reusable-check-rename.yml
|
||||
.github/workflows/on-pr.yml
|
||||
|
||||
# Keep the paths below in sync with those in `on-trigger.yml`.
|
||||
.github/actions/build-deps/**
|
||||
.github/actions/build-test/**
|
||||
.github/actions/generate-version/**
|
||||
.github/actions/setup-conan/**
|
||||
.github/scripts/strategy-matrix/**
|
||||
.github/workflows/reusable-build.yml
|
||||
.github/workflows/reusable-build-test-config.yml
|
||||
.github/workflows/reusable-build-test.yml
|
||||
.github/workflows/reusable-strategy-matrix.yml
|
||||
.github/workflows/reusable-test.yml
|
||||
.github/workflows/reusable-upload-recipe.yml
|
||||
.codecov.yml
|
||||
cmake/**
|
||||
conan/**
|
||||
external/**
|
||||
include/**
|
||||
src/**
|
||||
tests/**
|
||||
CMakeLists.txt
|
||||
conanfile.py
|
||||
conan.lock
|
||||
- name: Check whether to run
|
||||
# This step determines whether the rest of the workflow should
|
||||
# run. The rest of the workflow will run if this job runs AND at
|
||||
# least one of:
|
||||
# * Any of the files checked in the `changes` step were modified
|
||||
# * The PR is NOT a draft and is labeled "Ready to merge"
|
||||
# * The workflow is running from the merge queue
|
||||
id: go
|
||||
env:
|
||||
FILES: ${{ steps.changes.outputs.any_changed }}
|
||||
DRAFT: ${{ github.event.pull_request.draft }}
|
||||
READY: ${{ contains(github.event.pull_request.labels.*.name, 'Ready to merge') }}
|
||||
MERGE: ${{ github.event_name == 'merge_group' }}
|
||||
run: |
|
||||
echo "go=${{ (env.DRAFT != 'true' && env.READY == 'true') || env.FILES == 'true' || env.MERGE == 'true' }}" >> "${GITHUB_OUTPUT}"
|
||||
cat "${GITHUB_OUTPUT}"
|
||||
outputs:
|
||||
go: ${{ steps.go.outputs.go == 'true' }}
|
||||
|
||||
check-levelization:
|
||||
needs: should-run
|
||||
if: ${{ needs.should-run.outputs.go == 'true' }}
|
||||
uses: ./.github/workflows/reusable-check-levelization.yml
|
||||
|
||||
check-rename:
|
||||
needs: should-run
|
||||
if: ${{ needs.should-run.outputs.go == 'true' }}
|
||||
uses: ./.github/workflows/reusable-check-rename.yml
|
||||
|
||||
build-test:
|
||||
needs: should-run
|
||||
if: ${{ needs.should-run.outputs.go == 'true' }}
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [linux, macos, windows]
|
||||
with:
|
||||
# Enable ccache only for events targeting the XRPLF repository, since
|
||||
# other accounts will not have access to our remote cache storage.
|
||||
ccache_enabled: ${{ github.repository_owner == 'XRPLF' }}
|
||||
os: ${{ matrix.os }}
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
upload-recipe:
|
||||
needs:
|
||||
- should-run
|
||||
- build-test
|
||||
# Only run when committing to a PR that targets a release branch in the
|
||||
# XRPLF repository.
|
||||
if: ${{ github.repository_owner == 'XRPLF' && needs.should-run.outputs.go == 'true' && startsWith(github.ref, 'refs/heads/release') }}
|
||||
uses: ./.github/workflows/reusable-upload-recipe.yml
|
||||
secrets:
|
||||
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||
|
||||
notify-clio:
|
||||
needs: upload-recipe
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Notify the Clio repository about the newly proposed release version, so
|
||||
# it can be checked for compatibility before the release is actually made.
|
||||
- name: Notify Clio
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }}
|
||||
PR_URL: ${{ github.event.pull_request.html_url }}
|
||||
run: |
|
||||
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
|
||||
-F "client_payload[ref]=${{ needs.upload-recipe.outputs.recipe_ref }}" \
|
||||
-F "client_payload[pr_url]=${PR_URL}"
|
||||
|
||||
passed:
|
||||
if: failure() || cancelled()
|
||||
needs:
|
||||
- check-levelization
|
||||
- check-rename
|
||||
- build-test
|
||||
- upload-recipe
|
||||
- notify-clio
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Fail
|
||||
run: false
|
||||
25
.github/workflows/on-tag.yml
vendored
Normal file
25
.github/workflows/on-tag.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# This workflow uploads the libxrpl recipe to the Conan remote when a versioned
|
||||
# tag is pushed.
|
||||
name: Tag
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
upload-recipe:
|
||||
# Only run when a tag is pushed to the XRPLF repository.
|
||||
if: ${{ github.repository_owner == 'XRPLF' }}
|
||||
uses: ./.github/workflows/reusable-upload-recipe.yml
|
||||
secrets:
|
||||
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||
88
.github/workflows/on-trigger.yml
vendored
Normal file
88
.github/workflows/on-trigger.yml
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
# This workflow runs all workflows to build and test the code on various Linux
|
||||
# flavors, as well as on MacOS and Windows, on a scheduled basis, on merge into
|
||||
# the 'develop' or 'release*' branches, or when requested manually. Upon pushes
|
||||
# to the develop branch it also uploads the libxrpl recipe to the Conan remote.
|
||||
name: Trigger
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
- "release*"
|
||||
paths:
|
||||
# These paths are unique to `on-trigger.yml`.
|
||||
- ".github/workflows/on-trigger.yml"
|
||||
|
||||
# Keep the paths below in sync with those in `on-pr.yml`.
|
||||
- ".github/actions/build-deps/**"
|
||||
- ".github/actions/build-test/**"
|
||||
- ".github/actions/generate-version/**"
|
||||
- ".github/actions/setup-conan/**"
|
||||
- ".github/scripts/strategy-matrix/**"
|
||||
- ".github/workflows/reusable-build.yml"
|
||||
- ".github/workflows/reusable-build-test-config.yml"
|
||||
- ".github/workflows/reusable-build-test.yml"
|
||||
- ".github/workflows/reusable-strategy-matrix.yml"
|
||||
- ".github/workflows/reusable-test.yml"
|
||||
- ".github/workflows/reusable-upload-recipe.yml"
|
||||
- ".codecov.yml"
|
||||
- "cmake/**"
|
||||
- "conan/**"
|
||||
- "external/**"
|
||||
- "include/**"
|
||||
- "src/**"
|
||||
- "tests/**"
|
||||
- "CMakeLists.txt"
|
||||
- "conanfile.py"
|
||||
- "conan.lock"
|
||||
|
||||
# Run at 06:32 UTC on every day of the week from Monday through Friday. This
|
||||
# will force all dependencies to be rebuilt, which is useful to verify that
|
||||
# all dependencies can be built successfully. Only the dependencies that
|
||||
# are actually missing from the remote will be uploaded.
|
||||
schedule:
|
||||
- cron: "32 6 * * 1-5"
|
||||
|
||||
# Run when manually triggered via the GitHub UI or API.
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
# When a PR is merged into the develop branch it will be assigned a unique
|
||||
# group identifier, so execution will continue even if another PR is merged
|
||||
# while it is still running. In all other cases the group identifier is shared
|
||||
# per branch, so that any in-progress runs are cancelled when a new commit is
|
||||
# pushed.
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'push' && github.ref == 'refs/heads/develop' && github.sha || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
build-test:
|
||||
uses: ./.github/workflows/reusable-build-test.yml
|
||||
strategy:
|
||||
fail-fast: ${{ github.event_name == 'merge_group' }}
|
||||
matrix:
|
||||
os: [linux, macos, windows]
|
||||
with:
|
||||
# Enable ccache only for events targeting the XRPLF repository, since
|
||||
# other accounts will not have access to our remote cache storage.
|
||||
# However, we do not enable ccache for events targeting a release branch,
|
||||
# to protect against the rare case that the output produced by ccache is
|
||||
# not identical to a regular compilation.
|
||||
ccache_enabled: ${{ github.repository_owner == 'XRPLF' && !startsWith(github.ref, 'refs/heads/release') }}
|
||||
os: ${{ matrix.os }}
|
||||
strategy_matrix: ${{ github.event_name == 'schedule' && 'all' || 'minimal' }}
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
upload-recipe:
|
||||
needs: build-test
|
||||
# Only run when pushing to the develop branch in the XRPLF repository.
|
||||
if: ${{ github.repository_owner == 'XRPLF' && github.event_name == 'push' && github.ref == 'refs/heads/develop' }}
|
||||
uses: ./.github/workflows/reusable-upload-recipe.yml
|
||||
secrets:
|
||||
remote_username: ${{ secrets.CONAN_REMOTE_USERNAME }}
|
||||
remote_password: ${{ secrets.CONAN_REMOTE_PASSWORD }}
|
||||
17
.github/workflows/pre-commit.yml
vendored
Normal file
17
.github/workflows/pre-commit.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Run pre-commit hooks
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
- "release*"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
# Call the workflow in the XRPLF/actions repo that runs the pre-commit hooks.
|
||||
run-hooks:
|
||||
uses: XRPLF/actions/.github/workflows/pre-commit.yml@320be44621ca2a080f05aeb15817c44b84518108
|
||||
with:
|
||||
runs_on: ubuntu-latest
|
||||
container: '{ "image": "ghcr.io/xrplf/ci/tools-rippled-pre-commit:sha-ab4d1f0" }'
|
||||
72
.github/workflows/publish-docs.yml
vendored
Normal file
72
.github/workflows/publish-docs.yml
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
# This workflow builds the documentation for the repository, and publishes it to
|
||||
# GitHub Pages when changes are merged into the default branch.
|
||||
name: Build and publish documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- ".github/workflows/publish-docs.yml"
|
||||
- "*.md"
|
||||
- "**/*.md"
|
||||
- "docs/**"
|
||||
- "include/**"
|
||||
- "src/libxrpl/**"
|
||||
- "src/xrpld/**"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
BUILD_DIR: build
|
||||
NPROC_SUBTRACT: 2
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/xrplf/ci/tools-rippled-documentation:sha-a8c7be1
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ env.NPROC_SUBTRACT }}
|
||||
|
||||
- name: Check configuration
|
||||
run: |
|
||||
echo 'Checking path.'
|
||||
echo ${PATH} | tr ':' '\n'
|
||||
|
||||
echo 'Checking environment variables.'
|
||||
env | sort
|
||||
|
||||
echo 'Checking CMake version.'
|
||||
cmake --version
|
||||
|
||||
echo 'Checking Doxygen version.'
|
||||
doxygen --version
|
||||
|
||||
- name: Build documentation
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
run: |
|
||||
mkdir -p "${BUILD_DIR}"
|
||||
cd "${BUILD_DIR}"
|
||||
cmake -Donly_docs=ON ..
|
||||
cmake --build . --target docs --parallel ${BUILD_NPROC}
|
||||
|
||||
- name: Publish documentation
|
||||
if: ${{ github.ref_type == 'branch' && github.ref_name == github.event.repository.default_branch }}
|
||||
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4.0.0
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ${{ env.BUILD_DIR }}/docs/html
|
||||
265
.github/workflows/reusable-build-test-config.yml
vendored
Normal file
265
.github/workflows/reusable-build-test-config.yml
vendored
Normal file
@@ -0,0 +1,265 @@
|
||||
name: Build and test configuration
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
build_only:
|
||||
description: 'Whether to only build or to build and test the code ("true", "false").'
|
||||
required: true
|
||||
type: boolean
|
||||
|
||||
build_type:
|
||||
description: 'The build type to use ("Debug", "Release").'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
ccache_enabled:
|
||||
description: "Whether to enable ccache."
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
cmake_args:
|
||||
description: "Additional arguments to pass to CMake."
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
cmake_target:
|
||||
description: "The CMake target to build."
|
||||
required: true
|
||||
type: string
|
||||
|
||||
runs_on:
|
||||
description: Runner to run the job on as a JSON string
|
||||
required: true
|
||||
type: string
|
||||
|
||||
image:
|
||||
description: "The image to run in (leave empty to run natively)"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
config_name:
|
||||
description: "The configuration string (used for naming artifacts and such)."
|
||||
required: true
|
||||
type: string
|
||||
|
||||
nproc_subtract:
|
||||
description: "The number of processors to subtract when calculating parallelism."
|
||||
required: false
|
||||
type: number
|
||||
default: 2
|
||||
|
||||
sanitizers:
|
||||
description: "The sanitizers to enable."
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
|
||||
secrets:
|
||||
CODECOV_TOKEN:
|
||||
description: "The Codecov token to use for uploading coverage reports."
|
||||
required: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
# Conan installs the generators in the build/generators directory, see the
|
||||
# layout() method in conanfile.py. We then run CMake from the build directory.
|
||||
BUILD_DIR: build
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
name: ${{ inputs.config_name }}
|
||||
runs-on: ${{ fromJSON(inputs.runs_on) }}
|
||||
container: ${{ inputs.image != '' && inputs.image || null }}
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
# Use a namespace to keep the objects separate for each configuration.
|
||||
CCACHE_NAMESPACE: ${{ inputs.config_name }}
|
||||
# Ccache supports both Redis and HTTP endpoints.
|
||||
# * For Redis, use the following format: redis://ip:port, see
|
||||
# https://github.com/ccache/ccache/wiki/Redis-storage. Note that TLS is
|
||||
# not directly supported by ccache, and requires use of a proxy.
|
||||
# * For HTTP use the following format: http://ip:port/cache when using
|
||||
# nginx as backend or http://ip:port|layout=bazel when using Bazel
|
||||
# Remote Cache, see https://github.com/ccache/ccache/wiki/HTTP-storage.
|
||||
# Note that HTTPS is not directly supported by ccache.
|
||||
CCACHE_REMOTE_ONLY: true
|
||||
CCACHE_REMOTE_STORAGE: http://cache.dev.ripplex.io:8080|layout=bazel
|
||||
# Ignore the creation and modification timestamps on files, since the
|
||||
# header files are copied into separate directories by CMake, which will
|
||||
# otherwise result in cache misses.
|
||||
CCACHE_SLOPPINESS: include_file_ctime,include_file_mtime
|
||||
# Determine if coverage and voidstar should be enabled.
|
||||
COVERAGE_ENABLED: ${{ contains(inputs.cmake_args, '-Dcoverage=ON') }}
|
||||
VOIDSTAR_ENABLED: ${{ contains(inputs.cmake_args, '-Dvoidstar=ON') }}
|
||||
SANITIZERS_ENABLED: ${{ inputs.sanitizers != '' }}
|
||||
steps:
|
||||
- name: Cleanup workspace (macOS and Windows)
|
||||
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
|
||||
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
||||
with:
|
||||
enable_ccache: ${{ inputs.ccache_enabled }}
|
||||
|
||||
- name: Set ccache log file
|
||||
if: ${{ inputs.ccache_enabled && runner.debug == '1' }}
|
||||
run: echo "CCACHE_LOGFILE=${{ runner.temp }}/ccache.log" >> "${GITHUB_ENV}"
|
||||
|
||||
- name: Print build environment
|
||||
uses: ./.github/actions/print-env
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ inputs.nproc_subtract }}
|
||||
|
||||
- name: Setup Conan
|
||||
env:
|
||||
SANITIZERS: ${{ inputs.sanitizers }}
|
||||
uses: ./.github/actions/setup-conan
|
||||
|
||||
- name: Build dependencies
|
||||
uses: ./.github/actions/build-deps
|
||||
with:
|
||||
build_nproc: ${{ steps.nproc.outputs.nproc }}
|
||||
build_type: ${{ inputs.build_type }}
|
||||
# Set the verbosity to "quiet" for Windows to avoid an excessive
|
||||
# amount of logs. For other OSes, the "verbose" logs are more useful.
|
||||
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
|
||||
sanitizers: ${{ inputs.sanitizers }}
|
||||
|
||||
- name: Configure CMake
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
SANITIZERS: ${{ inputs.sanitizers }}
|
||||
CMAKE_ARGS: ${{ inputs.cmake_args }}
|
||||
run: |
|
||||
cmake \
|
||||
-G '${{ runner.os == 'Windows' && 'Visual Studio 17 2022' || 'Ninja' }}' \
|
||||
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
|
||||
-DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \
|
||||
${CMAKE_ARGS} \
|
||||
..
|
||||
|
||||
- name: Build the binary
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
CMAKE_TARGET: ${{ inputs.cmake_target }}
|
||||
run: |
|
||||
cmake \
|
||||
--build . \
|
||||
--config "${BUILD_TYPE}" \
|
||||
--parallel "${BUILD_NPROC}" \
|
||||
--target "${CMAKE_TARGET}"
|
||||
|
||||
- name: Show ccache statistics
|
||||
if: ${{ inputs.ccache_enabled }}
|
||||
run: |
|
||||
ccache --show-stats -vv
|
||||
if [ '${{ runner.debug }}' = '1' ]; then
|
||||
cat "${CCACHE_LOGFILE}"
|
||||
curl ${CCACHE_REMOTE_STORAGE%|*}/status || true
|
||||
fi
|
||||
|
||||
- name: Upload the binary (Linux)
|
||||
if: ${{ github.repository_owner == 'XRPLF' && runner.os == 'Linux' }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: xrpld-${{ inputs.config_name }}
|
||||
path: ${{ env.BUILD_DIR }}/xrpld
|
||||
retention-days: 3
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Check linking (Linux)
|
||||
if: ${{ runner.os == 'Linux' && env.SANITIZERS_ENABLED == 'false' }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
run: |
|
||||
ldd ./xrpld
|
||||
if [ "$(ldd ./xrpld | grep -E '(libstdc\+\+|libgcc)' | wc -l)" -eq 0 ]; then
|
||||
echo 'The binary is statically linked.'
|
||||
else
|
||||
echo 'The binary is dynamically linked.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Verify presence of instrumentation (Linux)
|
||||
if: ${{ runner.os == 'Linux' && env.VOIDSTAR_ENABLED == 'true' }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
run: |
|
||||
./xrpld --version | grep libvoidstar
|
||||
|
||||
- name: Set sanitizer options
|
||||
if: ${{ !inputs.build_only && env.SANITIZERS_ENABLED == 'true' }}
|
||||
run: |
|
||||
echo "ASAN_OPTIONS=print_stacktrace=1:detect_container_overflow=0:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/asan.supp" >> ${GITHUB_ENV}
|
||||
echo "TSAN_OPTIONS=second_deadlock_stack=1:halt_on_error=0:suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/tsan.supp" >> ${GITHUB_ENV}
|
||||
echo "UBSAN_OPTIONS=suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/ubsan.supp" >> ${GITHUB_ENV}
|
||||
echo "LSAN_OPTIONS=suppressions=${GITHUB_WORKSPACE}/sanitizers/suppressions/lsan.supp" >> ${GITHUB_ENV}
|
||||
|
||||
- name: Run the separate tests
|
||||
if: ${{ !inputs.build_only }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
# Windows locks some of the build files while running tests, and parallel jobs can collide
|
||||
env:
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
PARALLELISM: ${{ runner.os == 'Windows' && '1' || steps.nproc.outputs.nproc }}
|
||||
run: |
|
||||
ctest \
|
||||
--output-on-failure \
|
||||
-C "${BUILD_TYPE}" \
|
||||
-j "${PARALLELISM}"
|
||||
|
||||
- name: Run the embedded tests
|
||||
if: ${{ !inputs.build_only }}
|
||||
working-directory: ${{ runner.os == 'Windows' && format('{0}/{1}', env.BUILD_DIR, inputs.build_type) || env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
run: |
|
||||
./xrpld --unittest --unittest-jobs "${BUILD_NPROC}"
|
||||
|
||||
- name: Debug failure (Linux)
|
||||
if: ${{ failure() && runner.os == 'Linux' && !inputs.build_only }}
|
||||
run: |
|
||||
echo "IPv4 local port range:"
|
||||
cat /proc/sys/net/ipv4/ip_local_port_range
|
||||
echo "Netstat:"
|
||||
netstat -an
|
||||
|
||||
- name: Prepare coverage report
|
||||
if: ${{ !inputs.build_only && env.COVERAGE_ENABLED == 'true' }}
|
||||
working-directory: ${{ env.BUILD_DIR }}
|
||||
env:
|
||||
BUILD_NPROC: ${{ steps.nproc.outputs.nproc }}
|
||||
BUILD_TYPE: ${{ inputs.build_type }}
|
||||
run: |
|
||||
cmake \
|
||||
--build . \
|
||||
--config "${BUILD_TYPE}" \
|
||||
--parallel "${BUILD_NPROC}" \
|
||||
--target coverage
|
||||
|
||||
- name: Upload coverage report
|
||||
if: ${{ github.repository_owner == 'XRPLF' && !inputs.build_only && env.COVERAGE_ENABLED == 'true' }}
|
||||
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3
|
||||
with:
|
||||
disable_search: true
|
||||
disable_telem: true
|
||||
fail_ci_if_error: true
|
||||
files: ${{ env.BUILD_DIR }}/coverage.xml
|
||||
plugins: noop
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
verbose: true
|
||||
62
.github/workflows/reusable-build-test.yml
vendored
Normal file
62
.github/workflows/reusable-build-test.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
# This workflow builds and tests the binary for various configurations.
|
||||
name: Build and test
|
||||
|
||||
# This workflow can only be triggered by other workflows. Note that the
|
||||
# workflow_call event does not support the 'choice' input type, see
|
||||
# https://docs.github.com/en/actions/reference/workflows-and-actions/workflow-syntax#onworkflow_callinputsinput_idtype,
|
||||
# so we use 'string' instead.
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
ccache_enabled:
|
||||
description: "Whether to enable ccache."
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
os:
|
||||
description: 'The operating system to use for the build ("linux", "macos", "windows").'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
strategy_matrix:
|
||||
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
|
||||
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
|
||||
required: false
|
||||
type: string
|
||||
default: "minimal"
|
||||
|
||||
secrets:
|
||||
CODECOV_TOKEN:
|
||||
description: "The Codecov token to use for uploading coverage reports."
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
# Generate the strategy matrix to be used by the following job.
|
||||
generate-matrix:
|
||||
uses: ./.github/workflows/reusable-strategy-matrix.yml
|
||||
with:
|
||||
os: ${{ inputs.os }}
|
||||
strategy_matrix: ${{ inputs.strategy_matrix }}
|
||||
|
||||
# Build and test the binary for each configuration.
|
||||
build-test-config:
|
||||
needs:
|
||||
- generate-matrix
|
||||
uses: ./.github/workflows/reusable-build-test-config.yml
|
||||
strategy:
|
||||
fail-fast: ${{ github.event_name == 'merge_group' }}
|
||||
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||
max-parallel: 10
|
||||
with:
|
||||
build_only: ${{ matrix.build_only }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
ccache_enabled: ${{ inputs.ccache_enabled }}
|
||||
cmake_args: ${{ matrix.cmake_args }}
|
||||
cmake_target: ${{ matrix.cmake_target }}
|
||||
runs_on: ${{ toJSON(matrix.architecture.runner) }}
|
||||
image: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || '' }}
|
||||
config_name: ${{ matrix.config_name }}
|
||||
sanitizers: ${{ matrix.sanitizers }}
|
||||
secrets:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
46
.github/workflows/reusable-check-levelization.yml
vendored
Normal file
46
.github/workflows/reusable-check-levelization.yml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
# This workflow checks if the dependencies between the modules are correctly
|
||||
# indexed.
|
||||
name: Check levelization
|
||||
|
||||
# This workflow can only be triggered by other workflows.
|
||||
on: workflow_call
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-levelization
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
levelization:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Check levelization
|
||||
run: .github/scripts/levelization/generate.sh
|
||||
- name: Check for differences
|
||||
env:
|
||||
MESSAGE: |
|
||||
|
||||
The dependency relationships between the modules in xrpld have
|
||||
changed, which may be an improvement or a regression.
|
||||
|
||||
A rule of thumb is that if your changes caused something to be
|
||||
removed from loops.txt, it's probably an improvement, while if
|
||||
something was added, it's probably a regression.
|
||||
|
||||
Run '.github/scripts/levelization/generate.sh' in your repo, commit
|
||||
and push the changes. See .github/scripts/levelization/README.md for
|
||||
more info.
|
||||
run: |
|
||||
DIFF=$(git status --porcelain)
|
||||
if [ -n "${DIFF}" ]; then
|
||||
# Print the differences to give the contributor a hint about what to
|
||||
# expect when running levelization on their own machine.
|
||||
git diff
|
||||
echo "${MESSAGE}"
|
||||
exit 1
|
||||
fi
|
||||
54
.github/workflows/reusable-check-rename.yml
vendored
Normal file
54
.github/workflows/reusable-check-rename.yml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
# This workflow checks if the codebase is properly renamed, see more info in
|
||||
# .github/scripts/rename/README.md.
|
||||
name: Check rename
|
||||
|
||||
# This workflow can only be triggered by other workflows.
|
||||
on: workflow_call
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-rename
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
rename:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
- name: Check definitions
|
||||
run: .github/scripts/rename/definitions.sh .
|
||||
- name: Check copyright notices
|
||||
run: .github/scripts/rename/copyright.sh .
|
||||
- name: Check CMake configs
|
||||
run: .github/scripts/rename/cmake.sh .
|
||||
- name: Check binary name
|
||||
run: .github/scripts/rename/binary.sh .
|
||||
- name: Check namespaces
|
||||
run: .github/scripts/rename/namespace.sh .
|
||||
- name: Check config name
|
||||
run: .github/scripts/rename/config.sh .
|
||||
- name: Check include guards
|
||||
run: .github/scripts/rename/include.sh .
|
||||
- name: Check for differences
|
||||
env:
|
||||
MESSAGE: |
|
||||
|
||||
One or more files contain changes that do not adhere to new naming
|
||||
conventions.
|
||||
|
||||
Run the scripts in '.github/scripts/rename/' in your repo, commit
|
||||
and push the changes. See .github/scripts/rename/README.md for
|
||||
more info.
|
||||
run: |
|
||||
DIFF=$(git status --porcelain)
|
||||
if [ -n "${DIFF}" ]; then
|
||||
# Print the differences to give the contributor a hint about what to
|
||||
# expect when running the renaming scripts on their own machine.
|
||||
git diff
|
||||
echo "${MESSAGE}"
|
||||
exit 1
|
||||
fi
|
||||
45
.github/workflows/reusable-strategy-matrix.yml
vendored
Normal file
45
.github/workflows/reusable-strategy-matrix.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Generate strategy matrix
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
os:
|
||||
description: 'The operating system to use for the build ("linux", "macos", "windows").'
|
||||
required: false
|
||||
type: string
|
||||
strategy_matrix:
|
||||
# TODO: Support additional strategies, e.g. "ubuntu" for generating all Ubuntu configurations.
|
||||
description: 'The strategy matrix to use for generating the configurations ("minimal", "all").'
|
||||
required: false
|
||||
type: string
|
||||
default: "minimal"
|
||||
outputs:
|
||||
matrix:
|
||||
description: "The generated strategy matrix."
|
||||
value: ${{ jobs.generate-matrix.outputs.matrix }}
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
generate-matrix:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
matrix: ${{ steps.generate.outputs.matrix }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: 3.13
|
||||
|
||||
- name: Generate strategy matrix
|
||||
working-directory: .github/scripts/strategy-matrix
|
||||
id: generate
|
||||
env:
|
||||
GENERATE_CONFIG: ${{ inputs.os != '' && format('--config={0}.json', inputs.os) || '' }}
|
||||
GENERATE_OPTION: ${{ inputs.strategy_matrix == 'all' && '--all' || '' }}
|
||||
run: ./generate.py ${GENERATE_OPTION} ${GENERATE_CONFIG} >> "${GITHUB_OUTPUT}"
|
||||
97
.github/workflows/reusable-upload-recipe.yml
vendored
Normal file
97
.github/workflows/reusable-upload-recipe.yml
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
# This workflow exports the built libxrpl package to the Conan remote.
|
||||
name: Upload Conan recipe
|
||||
|
||||
# This workflow can only be triggered by other workflows.
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
remote_name:
|
||||
description: "The name of the Conan remote to use."
|
||||
required: false
|
||||
type: string
|
||||
default: xrplf
|
||||
remote_url:
|
||||
description: "The URL of the Conan endpoint to use."
|
||||
required: false
|
||||
type: string
|
||||
default: https://conan.ripplex.io
|
||||
|
||||
secrets:
|
||||
remote_username:
|
||||
description: "The username for logging into the Conan remote."
|
||||
required: true
|
||||
remote_password:
|
||||
description: "The password for logging into the Conan remote."
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
recipe_ref:
|
||||
description: "The Conan recipe reference ('name/version') that was uploaded."
|
||||
value: ${{ jobs.upload.outputs.ref }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}-upload-recipe
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
upload:
|
||||
runs-on: ubuntu-latest
|
||||
container: ghcr.io/xrplf/ci/ubuntu-noble:gcc-13-sha-5dd7158
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Generate build version number
|
||||
id: version
|
||||
uses: ./.github/actions/generate-version
|
||||
|
||||
- name: Set up Conan
|
||||
uses: ./.github/actions/setup-conan
|
||||
with:
|
||||
remote_name: ${{ inputs.remote_name }}
|
||||
remote_url: ${{ inputs.remote_url }}
|
||||
|
||||
- name: Log into Conan remote
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
REMOTE_USERNAME: ${{ secrets.remote_username }}
|
||||
REMOTE_PASSWORD: ${{ secrets.remote_password }}
|
||||
run: conan remote login "${REMOTE_NAME}" "${REMOTE_USERNAME}" --password "${REMOTE_PASSWORD}"
|
||||
|
||||
- name: Upload Conan recipe (version)
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
run: |
|
||||
conan export . --version=${{ steps.version.outputs.version }}
|
||||
conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/${{ steps.version.outputs.version }}
|
||||
|
||||
- name: Upload Conan recipe (develop)
|
||||
if: ${{ github.ref == 'refs/heads/develop' }}
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
run: |
|
||||
conan export . --version=develop
|
||||
conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/develop
|
||||
|
||||
- name: Upload Conan recipe (rc)
|
||||
if: ${{ startsWith(github.ref, 'refs/heads/release') }}
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
run: |
|
||||
conan export . --version=rc
|
||||
conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/rc
|
||||
|
||||
- name: Upload Conan recipe (release)
|
||||
if: ${{ github.event_name == 'tag' }}
|
||||
env:
|
||||
REMOTE_NAME: ${{ inputs.remote_name }}
|
||||
run: |
|
||||
conan export . --version=release
|
||||
conan upload --confirm --check --remote="${REMOTE_NAME}" xrpl/release
|
||||
|
||||
outputs:
|
||||
ref: xrpl/${{ steps.version.outputs.version }}
|
||||
113
.github/workflows/upload-conan-deps.yml
vendored
Normal file
113
.github/workflows/upload-conan-deps.yml
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
name: Upload Conan Dependencies
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 3 * * 2-6"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force_source_build:
|
||||
description: "Force source build of all dependencies"
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
force_upload:
|
||||
description: "Force upload of all dependencies"
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
pull_request:
|
||||
branches: [develop]
|
||||
paths:
|
||||
# This allows testing changes to the upload workflow in a PR
|
||||
- .github/workflows/upload-conan-deps.yml
|
||||
push:
|
||||
branches: [develop]
|
||||
paths:
|
||||
- .github/workflows/upload-conan-deps.yml
|
||||
- .github/workflows/reusable-strategy-matrix.yml
|
||||
- .github/actions/build-deps/action.yml
|
||||
- .github/actions/setup-conan/action.yml
|
||||
- ".github/scripts/strategy-matrix/**"
|
||||
- conanfile.py
|
||||
- conan.lock
|
||||
|
||||
env:
|
||||
CONAN_REMOTE_NAME: xrplf
|
||||
CONAN_REMOTE_URL: https://conan.ripplex.io
|
||||
NPROC_SUBTRACT: 2
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
# Generate the strategy matrix to be used by the following job.
|
||||
generate-matrix:
|
||||
uses: ./.github/workflows/reusable-strategy-matrix.yml
|
||||
with:
|
||||
strategy_matrix: ${{ github.event_name == 'pull_request' && 'minimal' || 'all' }}
|
||||
|
||||
# Build and upload the dependencies for each configuration.
|
||||
run-upload-conan-deps:
|
||||
needs:
|
||||
- generate-matrix
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }}
|
||||
max-parallel: 10
|
||||
runs-on: ${{ matrix.architecture.runner }}
|
||||
container: ${{ contains(matrix.architecture.platform, 'linux') && format('ghcr.io/xrplf/ci/{0}-{1}:{2}-{3}-sha-{4}', matrix.os.distro_name, matrix.os.distro_version, matrix.os.compiler_name, matrix.os.compiler_version, matrix.os.image_sha) || null }}
|
||||
steps:
|
||||
- name: Cleanup workspace (macOS and Windows)
|
||||
if: ${{ runner.os == 'macOS' || runner.os == 'Windows' }}
|
||||
uses: XRPLF/actions/cleanup-workspace@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@08eba0b27e820071cde6df949e0beb9ba4906955 # v4.3.0
|
||||
|
||||
- name: Prepare runner
|
||||
uses: XRPLF/actions/prepare-runner@2cbf481018d930656e9276fcc20dc0e3a0be5b6d
|
||||
with:
|
||||
enable_ccache: false
|
||||
|
||||
- name: Print build environment
|
||||
uses: ./.github/actions/print-env
|
||||
|
||||
- name: Get number of processors
|
||||
uses: XRPLF/actions/get-nproc@cf0433aa74563aead044a1e395610c96d65a37cf
|
||||
id: nproc
|
||||
with:
|
||||
subtract: ${{ env.NPROC_SUBTRACT }}
|
||||
|
||||
- name: Setup Conan
|
||||
env:
|
||||
SANITIZERS: ${{ matrix.sanitizers }}
|
||||
uses: ./.github/actions/setup-conan
|
||||
with:
|
||||
remote_name: ${{ env.CONAN_REMOTE_NAME }}
|
||||
remote_url: ${{ env.CONAN_REMOTE_URL }}
|
||||
|
||||
- name: Build dependencies
|
||||
uses: ./.github/actions/build-deps
|
||||
with:
|
||||
build_nproc: ${{ steps.nproc.outputs.nproc }}
|
||||
build_type: ${{ matrix.build_type }}
|
||||
force_build: ${{ github.event_name == 'schedule' || github.event.inputs.force_source_build == 'true' }}
|
||||
# Set the verbosity to "quiet" for Windows to avoid an excessive
|
||||
# amount of logs. For other OSes, the "verbose" logs are more useful.
|
||||
log_verbosity: ${{ runner.os == 'Windows' && 'quiet' || 'verbose' }}
|
||||
sanitizers: ${{ matrix.sanitizers }}
|
||||
|
||||
- name: Log into Conan remote
|
||||
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
|
||||
run: conan remote login "${CONAN_REMOTE_NAME}" "${{ secrets.CONAN_REMOTE_USERNAME }}" --password "${{ secrets.CONAN_REMOTE_PASSWORD }}"
|
||||
|
||||
- name: Upload Conan packages
|
||||
if: ${{ github.repository_owner == 'XRPLF' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}
|
||||
env:
|
||||
FORCE_OPTION: ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }}
|
||||
run: conan upload "*" --remote="${CONAN_REMOTE_NAME}" --confirm ${FORCE_OPTION}
|
||||
91
.github/workflows/windows.yml
vendored
91
.github/workflows/windows.yml
vendored
@@ -1,91 +0,0 @@
|
||||
name: windows
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
# If the branches list is ever changed, be sure to change it on all
|
||||
# build/test jobs (nix, macos, windows)
|
||||
branches:
|
||||
# Always build the package branches
|
||||
- develop
|
||||
- release
|
||||
- master
|
||||
# Branches that opt-in to running
|
||||
- 'ci/**'
|
||||
|
||||
# https://docs.github.com/en/actions/using-jobs/using-concurrency
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
generator:
|
||||
- Visual Studio 16 2019
|
||||
configuration:
|
||||
- Release
|
||||
# Github hosted runners tend to hang when running Debug unit tests.
|
||||
# Instead of trying to work around it, disable the Debug job until
|
||||
# something beefier (i.e. a heavy self-hosted runner) becomes
|
||||
# available.
|
||||
# - Debug
|
||||
runs-on: windows-2019
|
||||
env:
|
||||
build_dir: .build
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: choose Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: learn Python cache directory
|
||||
id: pip-cache
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
echo "dir=$(pip cache dir)" | tee ${GITHUB_OUTPUT}
|
||||
- name: restore Python cache directory
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }}
|
||||
- name: install Conan
|
||||
run: pip install wheel 'conan<2'
|
||||
- name: check environment
|
||||
run: |
|
||||
dir env:
|
||||
$env:PATH -split ';'
|
||||
python --version
|
||||
conan --version
|
||||
cmake --version
|
||||
- name: configure Conan
|
||||
shell: bash
|
||||
run: |
|
||||
conan profile new default --detect
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
conan profile update settings.compiler.runtime=MT${{ matrix.configuration == 'Debug' && 'd' || '' }} default
|
||||
- name: build dependencies
|
||||
uses: ./.github/actions/dependencies
|
||||
env:
|
||||
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
|
||||
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
|
||||
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
|
||||
with:
|
||||
configuration: ${{ matrix.configuration }}
|
||||
- name: build
|
||||
uses: ./.github/actions/build
|
||||
with:
|
||||
generator: '${{ matrix.generator }}'
|
||||
configuration: ${{ matrix.configuration }}
|
||||
# Hard code for now. Move to the matrix if varied options are needed
|
||||
cmake-args: '-Dassert=ON -Dreporting=OFF -Dunity=ON'
|
||||
cmake-target: install
|
||||
- name: test
|
||||
shell: bash
|
||||
run: |
|
||||
${build_dir}/${{ matrix.configuration }}/rippled --unittest --unittest-jobs $(nproc)
|
||||
123
.gitignore
vendored
123
.gitignore
vendored
@@ -1,70 +1,48 @@
|
||||
# .gitignore
|
||||
# cspell: disable
|
||||
|
||||
bin/boostbook_catalog.xml
|
||||
bin/config.log
|
||||
bin/project-cache.jam
|
||||
|
||||
# Ignore vim swap files.
|
||||
*.swp
|
||||
|
||||
# Ignore SCons support files.
|
||||
.sconsign.dblite
|
||||
|
||||
# Ignore python compiled files.
|
||||
*.pyc
|
||||
|
||||
# Ignore Macintosh Desktop Services Store files.
|
||||
# Macintosh Desktop Services Store files.
|
||||
.DS_Store
|
||||
|
||||
# Ignore backup/temps
|
||||
# Build, intermediate, and temporary artifacts.
|
||||
*~
|
||||
|
||||
# Ignore object files.
|
||||
*.o
|
||||
.nih_c
|
||||
tags
|
||||
TAGS
|
||||
GTAGS
|
||||
GRTAGS
|
||||
GPATH
|
||||
bin/rippled
|
||||
Debug/*.*
|
||||
Release/*.*
|
||||
*.pdb
|
||||
*.swp
|
||||
/.clangd
|
||||
Debug/
|
||||
Release/
|
||||
/.build/
|
||||
/build/
|
||||
/db/
|
||||
/out.txt
|
||||
/Testing/
|
||||
/tmp/
|
||||
CMakeSettings.json
|
||||
CMakeUserPresets.json
|
||||
|
||||
# Ignore coverage files.
|
||||
# Coverage files.
|
||||
*.gcno
|
||||
*.gcda
|
||||
*.gcov
|
||||
|
||||
# Levelization checking
|
||||
Builds/levelization/results/rawincludes.txt
|
||||
Builds/levelization/results/paths.txt
|
||||
Builds/levelization/results/includes/
|
||||
Builds/levelization/results/includedby/
|
||||
# Profiling data.
|
||||
gmon.out
|
||||
|
||||
# Ignore tmp directory.
|
||||
tmp
|
||||
# Levelization data.
|
||||
.github/scripts/levelization/results/*
|
||||
!.github/scripts/levelization/results/loops.txt
|
||||
!.github/scripts/levelization/results/ordering.txt
|
||||
|
||||
# Ignore database directory.
|
||||
db/
|
||||
db/*.db
|
||||
db/*.db-*
|
||||
# Customized configs.
|
||||
/rippled.cfg
|
||||
/xrpld.cfg
|
||||
/validators.txt
|
||||
|
||||
# Ignore debug logs
|
||||
debug_log.txt
|
||||
# Locally patched Conan recipes
|
||||
external/conan-center-index/
|
||||
|
||||
# Ignore customized configs
|
||||
rippled.cfg
|
||||
validators.txt
|
||||
|
||||
# Doxygen generated documentation output
|
||||
HtmlDocumentation
|
||||
docs/html_doc
|
||||
|
||||
# Xcode user-specific project settings
|
||||
# Xcode
|
||||
.DS_Store
|
||||
/build/
|
||||
# XCode IDE.
|
||||
*.pbxuser
|
||||
!default.pbxuser
|
||||
*.mode1v3
|
||||
@@ -77,38 +55,19 @@ xcuserdata
|
||||
profile
|
||||
*.moved-aside
|
||||
DerivedData
|
||||
.idea/
|
||||
*.hmap
|
||||
|
||||
# Intel Parallel Studio 2013 XE
|
||||
My Amplifier XE Results - RippleD
|
||||
# JetBrains IDE.
|
||||
/.idea/
|
||||
|
||||
# Compiler intermediate output
|
||||
/out.txt
|
||||
# Microsoft Visual Studio IDE.
|
||||
/.vs/
|
||||
/.vscode/
|
||||
|
||||
# Build Log
|
||||
rippled-build.log
|
||||
# zed IDE.
|
||||
/.zed/
|
||||
|
||||
# Profiling data
|
||||
gmon.out
|
||||
|
||||
Builds/VisualStudio2015/*.db
|
||||
Builds/VisualStudio2015/*.user
|
||||
Builds/VisualStudio2015/*.opendb
|
||||
Builds/VisualStudio2015/*.sdf
|
||||
|
||||
# MSVC
|
||||
*.pdb
|
||||
.vs/
|
||||
CMakeSettings.json
|
||||
compile_commands.json
|
||||
.clangd
|
||||
packages
|
||||
pkg_out
|
||||
pkg
|
||||
CMakeUserPresets.json
|
||||
bld.rippled/
|
||||
.vscode
|
||||
|
||||
# Suggested in-tree build directory
|
||||
/.build/
|
||||
# AI tools.
|
||||
/.augment
|
||||
/.claude
|
||||
/CLAUDE.md
|
||||
|
||||
@@ -1,6 +1,64 @@
|
||||
# .pre-commit-config.yaml
|
||||
# To run pre-commit hooks, first install pre-commit:
|
||||
# - `pip install pre-commit==${PRE_COMMIT_VERSION}`
|
||||
#
|
||||
# Then, run the following command to install the git hook scripts:
|
||||
# - `pre-commit install`
|
||||
# You can run all configured hooks against all files with:
|
||||
# - `pre-commit run --all-files`
|
||||
# To manually run a specific hook, use:
|
||||
# - `pre-commit run <hook_id> --all-files`
|
||||
# To run the hooks against only the staged files, use:
|
||||
# - `pre-commit run`
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||
rev: v10.0.1
|
||||
hooks:
|
||||
- id: clang-format
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: 3e8a8703264a2f4a69428a0aa4dcb512790b2c8c # frozen: v6.0.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: mixed-line-ending
|
||||
- id: check-merge-conflict
|
||||
args: [--assume-in-merge]
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-clang-format
|
||||
rev: 7d85583be209cb547946c82fbe51f4bc5dd1d017 # frozen: v18.1.8
|
||||
hooks:
|
||||
- id: clang-format
|
||||
args: [--style=file]
|
||||
"types_or": [c++, c, proto]
|
||||
|
||||
- repo: https://github.com/cheshirekow/cmake-format-precommit
|
||||
rev: e2c2116d86a80e72e7146a06e68b7c228afc6319 # frozen: v0.6.13
|
||||
hooks:
|
||||
- id: cmake-format
|
||||
additional_dependencies: [PyYAML]
|
||||
|
||||
- repo: https://github.com/rbubley/mirrors-prettier
|
||||
rev: 5ba47274f9b181bce26a5150a725577f3c336011 # frozen: v3.6.2
|
||||
hooks:
|
||||
- id: prettier
|
||||
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 831207fd435b47aeffdf6af853097e64322b4d44 # frozen: v25.12.0
|
||||
hooks:
|
||||
- id: black
|
||||
|
||||
- repo: https://github.com/streetsidesoftware/cspell-cli
|
||||
rev: 1cfa010f078c354f3ffb8413616280cc28f5ba21 # frozen: v9.4.0
|
||||
hooks:
|
||||
- id: cspell # Spell check changed files
|
||||
exclude: .config/cspell.config.yaml
|
||||
- id: cspell # Spell check the commit message
|
||||
name: check commit message spelling
|
||||
args:
|
||||
- --no-must-find-files
|
||||
- --no-progress
|
||||
- --no-summary
|
||||
- --files
|
||||
- .git/COMMIT_EDITMSG
|
||||
stages: [commit-msg]
|
||||
|
||||
exclude: |
|
||||
(?x)^(
|
||||
external/.*|
|
||||
.github/scripts/levelization/results/.*\.txt
|
||||
)$
|
||||
|
||||
1
.prettierignore
Normal file
1
.prettierignore
Normal file
@@ -0,0 +1 @@
|
||||
external
|
||||
232
API-CHANGELOG.md
232
API-CHANGELOG.md
@@ -6,56 +6,155 @@ For info about how [API versioning](https://xrpl.org/request-formatting.html#api
|
||||
|
||||
The API version controls the API behavior you see. This includes what properties you see in responses, what parameters you're permitted to send in requests, and so on. You specify the API version in each of your requests. When a breaking change is introduced to the `rippled` API, a new version is released. To avoid breaking your code, you should set (or increase) your version when you're ready to upgrade.
|
||||
|
||||
The [commandline](https://xrpl.org/docs/references/http-websocket-apis/api-conventions/request-formatting/#commandline-format) always uses the latest API version. The command line is intended for ad-hoc usage by humans, not programs or automated scripts. The command line is not meant for use in production code.
|
||||
|
||||
For a log of breaking changes, see the **API Version [number]** headings. In general, breaking changes are associated with a particular API Version number. For non-breaking changes, scroll to the **XRP Ledger version [x.y.z]** headings. Non-breaking changes are associated with a particular XRP Ledger (`rippled`) release.
|
||||
|
||||
## API Version 3 (Beta)
|
||||
|
||||
API version 3 is currently a beta API. It requires enabling `[beta_rpc_api]` in the rippled configuration to use. See [API-VERSION-3.md](API-VERSION-3.md) for the full list of changes in API version 3.
|
||||
|
||||
## API Version 2
|
||||
|
||||
API version 2 is available in `rippled` version 2.0.0 and later. See [API-VERSION-2.md](API-VERSION-2.md) for the full list of changes in API version 2.
|
||||
|
||||
## API Version 1
|
||||
|
||||
This version is supported by all `rippled` versions. At time of writing, it is the default API version, used when no `api_version` is specified. When a new API version is introduced, the command line interface will default to the latest API version. The command line is intended for ad-hoc usage by humans, not programs or automated scripts. The command line is not meant for use in production code.
|
||||
This version is supported by all `rippled` versions. For WebSocket and HTTP JSON-RPC requests, it is currently the default API version used when no `api_version` is specified.
|
||||
|
||||
### Idiosyncrasies
|
||||
## XRP Ledger server version 3.1.0
|
||||
|
||||
#### V1 account_info response
|
||||
[Version 3.1.0](https://github.com/XRPLF/rippled/releases/tag/3.1.0) was released on Jan 27, 2026.
|
||||
|
||||
In [the response to the `account_info` command](https://xrpl.org/account_info.html#response-format), there is `account_data` - which is supposed to be an `AccountRoot` object - and `signer_lists` is returned in this object. However, the docs say that `signer_lists` should be at the root level of the reponse.
|
||||
### Additions in 3.1.0
|
||||
|
||||
It makes sense for `signer_lists` to be at the root level because signer lists are not part of the AccountRoot object. (First reported in [xrpl-dev-portal#938](https://github.com/XRPLF/xrpl-dev-portal/issues/938).)
|
||||
- `vault_info`: New RPC method to retrieve information about a specific vault (part of XLS-66 Lending Protocol). ([#6156](https://github.com/XRPLF/rippled/pull/6156))
|
||||
|
||||
In `api_version: 2`, the `signer_lists` field [will be moved](#modifications-to-account_info-response-in-v2) to the root level of the account_info response. (https://github.com/XRPLF/rippled/pull/3770)
|
||||
## XRP Ledger server version 3.0.0
|
||||
|
||||
#### server_info - network_id
|
||||
[Version 3.0.0](https://github.com/XRPLF/rippled/releases/tag/3.0.0) was released on Dec 9, 2025.
|
||||
|
||||
The `network_id` field was added in the `server_info` response in version 1.5.0 (2019), but it is not returned in [reporting mode](https://xrpl.org/rippled-server-modes.html#reporting-mode).
|
||||
### Additions in 3.0.0
|
||||
|
||||
- `ledger_entry`: Supports all ledger entry types with dedicated parsers. ([#5237](https://github.com/XRPLF/rippled/pull/5237))
|
||||
- `ledger_entry`: New error codes `entryNotFound` and `unexpectedLedgerType` for more specific error handling. ([#5237](https://github.com/XRPLF/rippled/pull/5237))
|
||||
- `ledger_entry`: Improved error messages with more context (e.g., specifying which field is invalid or missing). ([#5237](https://github.com/XRPLF/rippled/pull/5237))
|
||||
- `ledger_entry`: Assorted bug fixes in RPC processing. ([#5237](https://github.com/XRPLF/rippled/pull/5237))
|
||||
- `simulate`: Supports additional metadata in the response. ([#5754](https://github.com/XRPLF/rippled/pull/5754))
|
||||
|
||||
## XRP Ledger server version 2.6.2
|
||||
|
||||
[Version 2.6.2](https://github.com/XRPLF/rippled/releases/tag/2.6.2) was released on Nov 19, 2025.
|
||||
|
||||
This release contains bug fixes only and no API changes.
|
||||
|
||||
## XRP Ledger server version 2.6.1
|
||||
|
||||
[Version 2.6.1](https://github.com/XRPLF/rippled/releases/tag/2.6.1) was released on Sep 30, 2025.
|
||||
|
||||
This release contains bug fixes only and no API changes.
|
||||
|
||||
## XRP Ledger server version 2.6.0
|
||||
|
||||
[Version 2.6.0](https://github.com/XRPLF/rippled/releases/tag/2.6.0) was released on Aug 27, 2025.
|
||||
|
||||
### Additions in 2.6.0
|
||||
|
||||
- `account_info`: Added `allowTrustLineLocking` flag in response. ([#5525](https://github.com/XRPLF/rippled/pull/5525))
|
||||
- `ledger`: Removed the type filter from the RPC command. ([#4934](https://github.com/XRPLF/rippled/pull/4934))
|
||||
- `subscribe` (`validations` stream): `network_id` is now included. ([#5579](https://github.com/XRPLF/rippled/pull/5579))
|
||||
- `subscribe` (`transactions` stream): `nftoken_id`, `nftoken_ids`, and `offer_id` are now included in transaction metadata. ([#5230](https://github.com/XRPLF/rippled/pull/5230))
|
||||
|
||||
## XRP Ledger server version 2.5.1
|
||||
|
||||
[Version 2.5.1](https://github.com/XRPLF/rippled/releases/tag/2.5.1) was released on Sep 17, 2025.
|
||||
|
||||
This release contains bug fixes only and no API changes.
|
||||
|
||||
## XRP Ledger server version 2.5.0
|
||||
|
||||
[Version 2.5.0](https://github.com/XRPLF/rippled/releases/tag/2.5.0) was released on Jun 24, 2025.
|
||||
|
||||
### Additions and bugfixes in 2.5.0
|
||||
|
||||
- `tx`: Added `ctid` field to the response and improved error handling. ([#4738](https://github.com/XRPLF/rippled/pull/4738))
|
||||
- `ledger_entry`: Improved error messages in `permissioned_domain`. ([#5344](https://github.com/XRPLF/rippled/pull/5344))
|
||||
- `simulate`: Improved multi-sign usage. ([#5479](https://github.com/XRPLF/rippled/pull/5479))
|
||||
- `channel_authorize`: If `signing_support` is not enabled in the config, the RPC is disabled. ([#5385](https://github.com/XRPLF/rippled/pull/5385))
|
||||
- `subscribe` (admin): Removed webhook queue limit to prevent dropping notifications; reduced HTTP timeout from 10 minutes to 30 seconds. ([#5163](https://github.com/XRPLF/rippled/pull/5163))
|
||||
- `ledger_data` (gRPC): Fixed crashing issue with some invalid markers. ([#5137](https://github.com/XRPLF/rippled/pull/5137))
|
||||
- `account_lines`: Fixed error with `no_ripple` and `no_ripple_peer` sometimes showing up incorrectly. ([#5345](https://github.com/XRPLF/rippled/pull/5345))
|
||||
- `account_tx`: Fixed issue with incorrect CTIDs. ([#5408](https://github.com/XRPLF/rippled/pull/5408))
|
||||
|
||||
## XRP Ledger server version 2.4.0
|
||||
|
||||
[Version 2.4.0](https://github.com/XRPLF/rippled/releases/tag/2.4.0) was released on March 4, 2025.
|
||||
|
||||
### Additions and bugfixes in 2.4.0
|
||||
|
||||
- `simulate`: A new RPC that executes a [dry run of a transaction submission](https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0069d-simulate#2-rpc-simulate). ([#5069](https://github.com/XRPLF/rippled/pull/5069))
|
||||
- Signing methods (`sign`, `sign_for`, `submit`): Autofill fees better, properly handle transactions without a base fee, and autofill the `NetworkID` field. ([#5069](https://github.com/XRPLF/rippled/pull/5069))
|
||||
- `ledger_entry`: `state` is added as an alias for `ripple_state`. ([#5199](https://github.com/XRPLF/rippled/pull/5199))
|
||||
- `ledger`, `ledger_data`, `account_objects`: Support filtering ledger entry types by their canonical names (case-insensitive). ([#5271](https://github.com/XRPLF/rippled/pull/5271))
|
||||
- `validators`: Added new field `validator_list_threshold` in response. ([#5112](https://github.com/XRPLF/rippled/pull/5112))
|
||||
- `server_info`: Added git commit hash info on admin connection. ([#5225](https://github.com/XRPLF/rippled/pull/5225))
|
||||
- `server_definitions`: Changed larger `UInt` serialized types to `Hash`. ([#5231](https://github.com/XRPLF/rippled/pull/5231))
|
||||
|
||||
## XRP Ledger server version 2.3.1
|
||||
|
||||
[Version 2.3.1](https://github.com/XRPLF/rippled/releases/tag/2.3.1) was released on Jan 29, 2025.
|
||||
|
||||
This release contains bug fixes only and no API changes.
|
||||
|
||||
## XRP Ledger server version 2.3.0
|
||||
|
||||
[Version 2.3.0](https://github.com/XRPLF/rippled/releases/tag/2.3.0) was released on Nov 25, 2024.
|
||||
|
||||
### Breaking changes in 2.3.0
|
||||
|
||||
- `book_changes`: If the requested ledger version is not available on this node, a `ledgerNotFound` error is returned and the node does not attempt to acquire the ledger from the p2p network (as with other non-admin RPCs). Admins can still attempt to retrieve old ledgers with the `ledger_request` RPC.
|
||||
|
||||
### Additions and bugfixes in 2.3.0
|
||||
|
||||
- `book_changes`: Returns a `validated` field in its response. ([#5096](https://github.com/XRPLF/rippled/pull/5096))
|
||||
- `book_changes`: Accepts shortcut strings (`current`, `closed`, `validated`) for the `ledger_index` parameter. ([#5096](https://github.com/XRPLF/rippled/pull/5096))
|
||||
- `server_definitions`: Include `index` in response. ([#5190](https://github.com/XRPLF/rippled/pull/5190))
|
||||
- `account_nfts`: Fix issue where unassociated marker would return incorrect results. ([#5045](https://github.com/XRPLF/rippled/pull/5045))
|
||||
- `account_objects`: Fix issue where invalid marker would not return an error. ([#5046](https://github.com/XRPLF/rippled/pull/5046))
|
||||
- `account_objects`: Disallow filtering by ledger entry types that an account cannot hold. ([#5056](https://github.com/XRPLF/rippled/pull/5056))
|
||||
- `tx`: Allow lowercase CTID. ([#5049](https://github.com/XRPLF/rippled/pull/5049))
|
||||
- `feature`: Better error handling for invalid values of `feature`. ([#5063](https://github.com/XRPLF/rippled/pull/5063))
|
||||
|
||||
## XRP Ledger server version 2.2.0
|
||||
|
||||
[Version 2.2.0](https://github.com/XRPLF/rippled/releases/tag/2.2.0) was released on Jun 5, 2024. The following additions are non-breaking (because they are purely additive):
|
||||
|
||||
- `feature`: Add a non-admin mode for users. (It was previously only available to admin connections.) The method returns an updated list of amendments, including their names and other information. ([#4781](https://github.com/XRPLF/rippled/pull/4781))
|
||||
|
||||
## XRP Ledger server version 2.0.1
|
||||
|
||||
[Version 2.0.1](https://github.com/XRPLF/rippled/releases/tag/2.0.1) was released on Jan 29, 2024. The following additions are non-breaking:
|
||||
|
||||
- `path_find`: Fixes unbounded memory growth. ([#4822](https://github.com/XRPLF/rippled/pull/4822))
|
||||
|
||||
## XRP Ledger server version 2.0.0
|
||||
|
||||
### Additions in 2.2
|
||||
|
||||
Additions are intended to be non-breaking (because they are purely additive).
|
||||
|
||||
- `feature`: A non-admin mode that uses the same formatting as admin RPC, but hides potentially-sensitive data.
|
||||
|
||||
### Additions in 2.0
|
||||
|
||||
Additions are intended to be non-breaking (because they are purely additive).
|
||||
[Version 2.0.0](https://github.com/XRPLF/rippled/releases/tag/2.0.0) was released on Jan 9, 2024. The following additions are non-breaking (because they are purely additive):
|
||||
|
||||
- `server_definitions`: A new RPC that generates a `definitions.json`-like output that can be used in XRPL libraries.
|
||||
- In `Payment` transactions, `DeliverMax` has been added. This is a replacement for the `Amount` field, which should not be used. Typically, the `delivered_amount` (in transaction metadata) should be used. To ease the transition, `DeliverMax` is present regardless of API version, since adding a field is non-breaking.
|
||||
- API version 2 has been moved from beta to supported, meaning that it is generally available (regardless of the `beta_rpc_api` setting).
|
||||
- API version 2 has been moved from beta to supported, meaning that it is generally available (regardless of the `beta_rpc_api` setting). The full list of changes is in [API-VERSION-2.md](API-VERSION-2.md).
|
||||
|
||||
## XRP Ledger server version 1.12.0
|
||||
|
||||
[Version 1.12.0](https://github.com/XRPLF/rippled/releases/tag/1.12.0) was released on Sep 6, 2023.
|
||||
[Version 1.12.0](https://github.com/XRPLF/rippled/releases/tag/1.12.0) was released on Sep 6, 2023. The following additions are non-breaking (because they are purely additive):
|
||||
|
||||
### Additions in 1.12
|
||||
|
||||
Additions are intended to be non-breaking (because they are purely additive).
|
||||
|
||||
- `server_info`: Added `ports`, an array which advertises the RPC and WebSocket ports. This information is also included in the `/crawl` endpoint (which calls `server_info` internally). `grpc` and `peer` ports are also included. (https://github.com/XRPLF/rippled/pull/4427)
|
||||
- `server_info`: Added `ports`, an array which advertises the RPC and WebSocket ports. This information is also included in the `/crawl` endpoint (which calls `server_info` internally). `grpc` and `peer` ports are also included. ([#4427](https://github.com/XRPLF/rippled/pull/4427))
|
||||
- `ports` contains objects, each containing a `port` for the listening port (a number string), and a `protocol` array listing the supported protocols on that port.
|
||||
- This allows crawlers to build a more detailed topology without needing to port-scan nodes.
|
||||
- (For peers and other non-admin clients, the info about admin ports is excluded.)
|
||||
- Clawback: The following additions are gated by the Clawback amendment (`featureClawback`). (https://github.com/XRPLF/rippled/pull/4553)
|
||||
- Adds an [AccountRoot flag](https://xrpl.org/accountroot.html#accountroot-flags) called `lsfAllowTrustLineClawback` (https://github.com/XRPLF/rippled/pull/4617)
|
||||
- Clawback: The following additions are gated by the Clawback amendment (`featureClawback`). ([#4553](https://github.com/XRPLF/rippled/pull/4553))
|
||||
- Adds an [AccountRoot flag](https://xrpl.org/accountroot.html#accountroot-flags) called `lsfAllowTrustLineClawback`. ([#4617](https://github.com/XRPLF/rippled/pull/4617))
|
||||
- Adds the corresponding `asfAllowTrustLineClawback` [AccountSet Flag](https://xrpl.org/accountset.html#accountset-flags) as well.
|
||||
- Clawback is disabled by default, so if an issuer desires the ability to claw back funds, they must use an `AccountSet` transaction to set the AllowTrustLineClawback flag. They must do this before creating any trust lines, offers, escrows, payment channels, or checks.
|
||||
- Adds the [Clawback transaction type](https://github.com/XRPLF/XRPL-Standards/blob/master/XLS-39d-clawback/README.md#331-clawback-transaction), containing these fields:
|
||||
@@ -90,16 +189,16 @@ Additions are intended to be non-breaking (because they are purely additive).
|
||||
|
||||
### Breaking changes in 1.11
|
||||
|
||||
- Added the ability to mark amendments as obsolete. For the `feature` admin API, there is a new possible value for the `vetoed` field. (https://github.com/XRPLF/rippled/pull/4291)
|
||||
- Added the ability to mark amendments as obsolete. For the `feature` admin API, there is a new possible value for the `vetoed` field. ([#4291](https://github.com/XRPLF/rippled/pull/4291))
|
||||
- The value of `vetoed` can now be `true`, `false`, or `"Obsolete"`.
|
||||
- Removed the acceptance of seeds or public keys in place of account addresses. (https://github.com/XRPLF/rippled/pull/4404)
|
||||
- Removed the acceptance of seeds or public keys in place of account addresses. ([#4404](https://github.com/XRPLF/rippled/pull/4404))
|
||||
- This simplifies the API and encourages better security practices (i.e. seeds should never be sent over the network).
|
||||
- For the `ledger_data` method, when all entries are filtered out, the `state` field of the response is now an empty list (in other words, an empty array, `[]`). (Previously, it would return `null`.) While this is technically a breaking change, the new behavior is consistent with the documentation, so this is considered only a bug fix. (https://github.com/XRPLF/rippled/pull/4398)
|
||||
- For the `ledger_data` method, when all entries are filtered out, the `state` field of the response is now an empty list (in other words, an empty array, `[]`). (Previously, it would return `null`.) While this is technically a breaking change, the new behavior is consistent with the documentation, so this is considered only a bug fix. ([#4398](https://github.com/XRPLF/rippled/pull/4398))
|
||||
- If and when the `fixNFTokenRemint` amendment activates, there will be a new AccountRoot field, `FirstNFTSequence`. This field is set to the current account sequence when the account issues their first NFT. If an account has not issued any NFTs, then the field is not set. ([#4406](https://github.com/XRPLF/rippled/pull/4406))
|
||||
- There is a new account deletion restriction: an account can only be deleted if `FirstNFTSequence` + `MintedNFTokens` + `256` is less than the current ledger sequence.
|
||||
- This is potentially a breaking change if clients have logic for determining whether an account can be deleted.
|
||||
- NetworkID
|
||||
- For sidechains and networks with a network ID greater than 1024, there is a new [transaction common field](https://xrpl.org/transaction-common-fields.html), `NetworkID`. (https://github.com/XRPLF/rippled/pull/4370)
|
||||
- For sidechains and networks with a network ID greater than 1024, there is a new [transaction common field](https://xrpl.org/transaction-common-fields.html), `NetworkID`. ([#4370](https://github.com/XRPLF/rippled/pull/4370))
|
||||
- This field helps to prevent replay attacks and is now required for chains whose network ID is 1025 or higher.
|
||||
- The field must be omitted for Mainnet, so there is no change for Mainnet users.
|
||||
- There are three new local error codes:
|
||||
@@ -109,10 +208,10 @@ Additions are intended to be non-breaking (because they are purely additive).
|
||||
|
||||
### Additions and bug fixes in 1.11
|
||||
|
||||
- Added `nftoken_id`, `nftoken_ids` and `offer_id` meta fields into NFT `tx` and `account_tx` responses. (https://github.com/XRPLF/rippled/pull/4447)
|
||||
- Added an `account_flags` object to the `account_info` method response. (https://github.com/XRPLF/rippled/pull/4459)
|
||||
- Added `NFTokenPages` to the `account_objects` RPC. (https://github.com/XRPLF/rippled/pull/4352)
|
||||
- Fixed: `marker` returned from the `account_lines` command would not work on subsequent commands. (https://github.com/XRPLF/rippled/pull/4361)
|
||||
- Added `nftoken_id`, `nftoken_ids` and `offer_id` meta fields into NFT `tx` and `account_tx` responses. ([#4447](https://github.com/XRPLF/rippled/pull/4447))
|
||||
- Added an `account_flags` object to the `account_info` method response. ([#4459](https://github.com/XRPLF/rippled/pull/4459))
|
||||
- Added `NFTokenPages` to the `account_objects` RPC. ([#4352](https://github.com/XRPLF/rippled/pull/4352))
|
||||
- Fixed: `marker` returned from the `account_lines` command would not work on subsequent commands. ([#4361](https://github.com/XRPLF/rippled/pull/4361))
|
||||
|
||||
## XRP Ledger server version 1.10.0
|
||||
|
||||
@@ -125,71 +224,6 @@ was released on Mar 14, 2023.
|
||||
removed from the [ledger subscription stream](https://xrpl.org/subscribe.html#ledger-stream), because it will no longer
|
||||
have any meaning.
|
||||
|
||||
## API Version 2
|
||||
|
||||
API version 2 is introduced in `rippled` version 2.0. Users can request it explicitly by specifying `"api_version" : 2`.
|
||||
|
||||
#### Removed methods
|
||||
|
||||
In API version 2, the following deprecated methods are no longer available: (https://github.com/XRPLF/rippled/pull/4759)
|
||||
|
||||
- `tx_history` - Instead, use other methods such as `account_tx` or `ledger` with the `transactions` field set to `true`.
|
||||
- `ledger_header` - Instead, use the `ledger` method.
|
||||
|
||||
#### Modifications to JSON transaction element in V2
|
||||
|
||||
In API version 2, JSON elements for transaction output have been changed and made consistent for all methods which output transactions. (https://github.com/XRPLF/rippled/pull/4775)
|
||||
This helps to unify the JSON serialization format of transactions. (https://github.com/XRPLF/clio/issues/722, https://github.com/XRPLF/rippled/issues/4727)
|
||||
|
||||
- JSON transaction element is named `tx_json`
|
||||
- Binary transaction element is named `tx_blob`
|
||||
- JSON transaction metadata element is named `meta`
|
||||
- Binary transaction metadata element is named `meta_blob`
|
||||
|
||||
Additionally, these elements are now consistently available next to `tx_json` (i.e. sibling elements), where possible:
|
||||
|
||||
- `hash` - Transaction ID. This data was stored inside transaction output in API version 1, but in API version 2 is a sibling element.
|
||||
- `ledger_index` - Ledger index (only set on validated ledgers)
|
||||
- `ledger_hash` - Ledger hash (only set on closed or validated ledgers)
|
||||
- `close_time_iso` - Ledger close time expressed in ISO 8601 time format (only set on validated ledgers)
|
||||
- `validated` - Bool element set to `true` if the transaction is in a validated ledger, otherwise `false`
|
||||
|
||||
This change affects the following methods:
|
||||
|
||||
- `tx` - Transaction data moved into element `tx_json` (was inline inside `result`) or, if binary output was requested, moved from `tx` to `tx_blob`. Renamed binary transaction metadata element (if it was requested) from `meta` to `meta_blob`. Changed location of `hash` and added new elements
|
||||
- `account_tx` - Renamed transaction element from `tx` to `tx_json`. Renamed binary transaction metadata element (if it was requested) from `meta` to `meta_blob`. Changed location of `hash` and added new elements
|
||||
- `transaction_entry` - Renamed transaction metadata element from `metadata` to `meta`. Changed location of `hash` and added new elements
|
||||
- `subscribe` - Renamed transaction element from `transaction` to `tx_json`. Changed location of `hash` and added new elements
|
||||
- `sign`, `sign_for`, `submit` and `submit_multisigned` - Changed location of `hash` element.
|
||||
|
||||
#### Modification to `Payment` transaction JSON schema
|
||||
|
||||
When reading Payments, the `Amount` field should generally **not** be used. Instead, use [delivered_amount](https://xrpl.org/partial-payments.html#the-delivered_amount-field) to see the amount that the Payment delivered. To clarify its meaning, the `Amount` field is being renamed to `DeliverMax`. (https://github.com/XRPLF/rippled/pull/4733)
|
||||
|
||||
- In `Payment` transaction type, JSON RPC field `Amount` is renamed to `DeliverMax`. To enable smooth client transition, `Amount` is still handled, as described below: (https://github.com/XRPLF/rippled/pull/4733)
|
||||
- On JSON RPC input (e.g. `submit_multisigned` etc. methods), `Amount` is recognized as an alias to `DeliverMax` for both API version 1 and version 2 clients.
|
||||
- On JSON RPC input, submitting both `Amount` and `DeliverMax` fields is allowed _only_ if they are identical; otherwise such input is rejected with `rpcINVALID_PARAMS` error.
|
||||
- On JSON RPC output (e.g. `subscribe`, `account_tx` etc. methods), `DeliverMax` is present in both API version 1 and version 2.
|
||||
- On JSON RPC output, `Amount` is only present in API version 1 and _not_ in version 2.
|
||||
|
||||
#### Modifications to account_info response
|
||||
|
||||
- `signer_lists` is returned in the root of the response. In API version 1, it was nested under `account_data`. (https://github.com/XRPLF/rippled/pull/3770)
|
||||
- When using an invalid `signer_lists` value, the API now returns an "invalidParams" error. (https://github.com/XRPLF/rippled/pull/4585)
|
||||
- (`signer_lists` must be a boolean. In API version 1, strings were accepted and may return a normal response - i.e. as if `signer_lists` were `true`.)
|
||||
|
||||
#### Modifications to [account_tx](https://xrpl.org/account_tx.html#account_tx) response
|
||||
|
||||
- Using `ledger_index_min`, `ledger_index_max`, and `ledger_index` returns `invalidParams` because if you use `ledger_index_min` or `ledger_index_max`, then it does not make sense to also specify `ledger_index`. In API version 1, no error was returned. (https://github.com/XRPLF/rippled/pull/4571)
|
||||
- The same applies for `ledger_index_min`, `ledger_index_max`, and `ledger_hash`. (https://github.com/XRPLF/rippled/issues/4545#issuecomment-1565065579)
|
||||
- Using a `ledger_index_min` or `ledger_index_max` beyond the range of ledgers that the server has:
|
||||
- returns `lgrIdxMalformed` in API version 2. Previously, in API version 1, no error was returned. (https://github.com/XRPLF/rippled/issues/4288)
|
||||
- Attempting to use a non-boolean value (such as a string) for the `binary` or `forward` parameters returns `invalidParams` (`rpcINVALID_PARAMS`). Previously, in API version 1, no error was returned. (https://github.com/XRPLF/rippled/pull/4620)
|
||||
|
||||
#### Modifications to [noripple_check](https://xrpl.org/noripple_check.html#noripple_check) response
|
||||
|
||||
- Attempting to use a non-boolean value (such as a string) for the `transactions` parameter returns `invalidParams` (`rpcINVALID_PARAMS`). Previously, in API version 1, no error was returned. (https://github.com/XRPLF/rippled/pull/4620)
|
||||
|
||||
# Unit tests for API changes
|
||||
|
||||
The following information is useful to developers contributing to this project:
|
||||
|
||||
66
API-VERSION-2.md
Normal file
66
API-VERSION-2.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# API Version 2
|
||||
|
||||
API version 2 is available in `rippled` version 2.0.0 and later. To use this API, clients specify `"api_version" : 2` in each request.
|
||||
|
||||
For info about how [API versioning](https://xrpl.org/request-formatting.html#api-versioning) works, including examples, please view the [XLS-22d spec](https://github.com/XRPLF/XRPL-Standards/discussions/54). For details about the implementation of API versioning, view the [implementation PR](https://github.com/XRPLF/rippled/pull/3155). API versioning ensures existing integrations and users continue to receive existing behavior, while those that request a higher API version will experience new behavior.
|
||||
|
||||
## Removed methods
|
||||
|
||||
In API version 2, the following deprecated methods are no longer available: ([#4759](https://github.com/XRPLF/rippled/pull/4759))
|
||||
|
||||
- `tx_history` - Instead, use other methods such as `account_tx` or `ledger` with the `transactions` field set to `true`.
|
||||
- `ledger_header` - Instead, use the `ledger` method.
|
||||
|
||||
## Modifications to JSON transaction element in API version 2
|
||||
|
||||
In API version 2, JSON elements for transaction output have been changed and made consistent for all methods which output transactions. ([#4775](https://github.com/XRPLF/rippled/pull/4775))
|
||||
This helps to unify the JSON serialization format of transactions. ([clio#722](https://github.com/XRPLF/clio/issues/722), [#4727](https://github.com/XRPLF/rippled/issues/4727))
|
||||
|
||||
- JSON transaction element is named `tx_json`
|
||||
- Binary transaction element is named `tx_blob`
|
||||
- JSON transaction metadata element is named `meta`
|
||||
- Binary transaction metadata element is named `meta_blob`
|
||||
|
||||
Additionally, these elements are now consistently available next to `tx_json` (i.e. sibling elements), where possible:
|
||||
|
||||
- `hash` - Transaction ID. This data was stored inside transaction output in API version 1, but in API version 2 is a sibling element.
|
||||
- `ledger_index` - Ledger index (only set on validated ledgers)
|
||||
- `ledger_hash` - Ledger hash (only set on closed or validated ledgers)
|
||||
- `close_time_iso` - Ledger close time expressed in ISO 8601 time format (only set on validated ledgers)
|
||||
- `validated` - Bool element set to `true` if the transaction is in a validated ledger, otherwise `false`
|
||||
|
||||
This change affects the following methods:
|
||||
|
||||
- `tx` - Transaction data moved into element `tx_json` (was inline inside `result`) or, if binary output was requested, moved from `tx` to `tx_blob`. Renamed binary transaction metadata element (if it was requested) from `meta` to `meta_blob`. Changed location of `hash` and added new elements
|
||||
- `account_tx` - Renamed transaction element from `tx` to `tx_json`. Renamed binary transaction metadata element (if it was requested) from `meta` to `meta_blob`. Changed location of `hash` and added new elements
|
||||
- `transaction_entry` - Renamed transaction metadata element from `metadata` to `meta`. Changed location of `hash` and added new elements
|
||||
- `subscribe` - Renamed transaction element from `transaction` to `tx_json`. Changed location of `hash` and added new elements
|
||||
- `sign`, `sign_for`, `submit` and `submit_multisigned` - Changed location of `hash` element.
|
||||
|
||||
## Modifications to `Payment` transaction JSON schema
|
||||
|
||||
When reading Payments, the `Amount` field should generally **not** be used. Instead, use [delivered_amount](https://xrpl.org/partial-payments.html#the-delivered_amount-field) to see the amount that the Payment delivered. To clarify its meaning, the `Amount` field is being renamed to `DeliverMax`. ([#4733](https://github.com/XRPLF/rippled/pull/4733))
|
||||
|
||||
- In `Payment` transaction type, JSON RPC field `Amount` is renamed to `DeliverMax`. To enable smooth client transition, `Amount` is still handled, as described below: ([#4733](https://github.com/XRPLF/rippled/pull/4733))
|
||||
- On JSON RPC input (e.g. `submit_multisigned` etc. methods), `Amount` is recognized as an alias to `DeliverMax` for both API version 1 and version 2 clients.
|
||||
- On JSON RPC input, submitting both `Amount` and `DeliverMax` fields is allowed _only_ if they are identical; otherwise such input is rejected with `rpcINVALID_PARAMS` error.
|
||||
- On JSON RPC output (e.g. `subscribe`, `account_tx` etc. methods), `DeliverMax` is present in both API version 1 and version 2.
|
||||
- On JSON RPC output, `Amount` is only present in API version 1 and _not_ in version 2.
|
||||
|
||||
## Modifications to account_info response
|
||||
|
||||
- `signer_lists` is returned in the root of the response. (In API version 1, it was nested under `account_data`.) ([#3770](https://github.com/XRPLF/rippled/pull/3770))
|
||||
- When using an invalid `signer_lists` value, the API now returns an "invalidParams" error. ([#4585](https://github.com/XRPLF/rippled/pull/4585))
|
||||
- (`signer_lists` must be a boolean. In API version 1, strings were accepted and may return a normal response - i.e. as if `signer_lists` were `true`.)
|
||||
|
||||
## Modifications to [account_tx](https://xrpl.org/account_tx.html#account_tx) response
|
||||
|
||||
- Using `ledger_index_min`, `ledger_index_max`, and `ledger_index` returns `invalidParams` because if you use `ledger_index_min` or `ledger_index_max`, then it does not make sense to also specify `ledger_index`. In API version 1, no error was returned. ([#4571](https://github.com/XRPLF/rippled/pull/4571))
|
||||
- The same applies for `ledger_index_min`, `ledger_index_max`, and `ledger_hash`. ([#4545](https://github.com/XRPLF/rippled/issues/4545#issuecomment-1565065579))
|
||||
- Using a `ledger_index_min` or `ledger_index_max` beyond the range of ledgers that the server has:
|
||||
- returns `lgrIdxMalformed` in API version 2. Previously, in API version 1, no error was returned. ([#4288](https://github.com/XRPLF/rippled/issues/4288))
|
||||
- Attempting to use a non-boolean value (such as a string) for the `binary` or `forward` parameters returns `invalidParams` (`rpcINVALID_PARAMS`). Previously, in API version 1, no error was returned. ([#4620](https://github.com/XRPLF/rippled/pull/4620))
|
||||
|
||||
## Modifications to [noripple_check](https://xrpl.org/noripple_check.html#noripple_check) response
|
||||
|
||||
- Attempting to use a non-boolean value (such as a string) for the `transactions` parameter returns `invalidParams` (`rpcINVALID_PARAMS`). Previously, in API version 1, no error was returned. ([#4620](https://github.com/XRPLF/rippled/pull/4620))
|
||||
27
API-VERSION-3.md
Normal file
27
API-VERSION-3.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# API Version 3
|
||||
|
||||
API version 3 is currently a **beta API**. It requires enabling `[beta_rpc_api]` in the rippled configuration to use. To use this API, clients specify `"api_version" : 3` in each request.
|
||||
|
||||
For info about how [API versioning](https://xrpl.org/request-formatting.html#api-versioning) works, including examples, please view the [XLS-22d spec](https://github.com/XRPLF/XRPL-Standards/discussions/54). For details about the implementation of API versioning, view the [implementation PR](https://github.com/XRPLF/rippled/pull/3155). API versioning ensures existing integrations and users continue to receive existing behavior, while those that request a higher API version will experience new behavior.
|
||||
|
||||
## Breaking Changes
|
||||
|
||||
### Modifications to `amm_info`
|
||||
|
||||
The order of error checks has been changed to provide more specific error messages. ([#4924](https://github.com/XRPLF/rippled/pull/4924))
|
||||
|
||||
- **Before (API v2)**: When sending an invalid account or asset to `amm_info` while other parameters are not set as expected, the method returns a generic `rpcINVALID_PARAMS` error.
|
||||
- **After (API v3)**: The same scenario returns a more specific error: `rpcISSUE_MALFORMED` for malformed assets or `rpcACT_MALFORMED` for malformed accounts.
|
||||
|
||||
### Modifications to `ledger_entry`
|
||||
|
||||
Added support for string shortcuts to look up fixed-location ledger entries using the `"index"` parameter. ([#5644](https://github.com/XRPLF/rippled/pull/5644))
|
||||
|
||||
In API version 3, the following string values can be used with the `"index"` parameter:
|
||||
|
||||
- `"index": "amendments"` - Returns the `Amendments` ledger entry
|
||||
- `"index": "fee"` - Returns the `FeeSettings` ledger entry
|
||||
- `"index": "nunl"` - Returns the `NegativeUNL` ledger entry
|
||||
- `"index": "hashes"` - Returns the "short" `LedgerHashes` ledger entry (recent ledger hashes)
|
||||
|
||||
These shortcuts are only available in API version 3 and later. In API versions 1 and 2, these string values would result in an error.
|
||||
675
BUILD.md
675
BUILD.md
@@ -1,31 +1,31 @@
|
||||
| :warning: **WARNING** :warning:
|
||||
|---|
|
||||
| :warning: **WARNING** :warning: |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| These instructions assume you have a C++ development environment ready with Git, Python, Conan, CMake, and a C++ compiler. For help setting one up on Linux, macOS, or Windows, [see this guide](./docs/build/environment.md). |
|
||||
|
||||
> These instructions also assume a basic familiarity with Conan and CMake.
|
||||
> If you are unfamiliar with Conan,
|
||||
> you can read our [crash course](./docs/build/conan.md)
|
||||
> or the official [Getting Started][3] walkthrough.
|
||||
> If you are unfamiliar with Conan, you can read our
|
||||
> [crash course](./docs/build/conan.md) or the official [Getting Started][3]
|
||||
> walkthrough.
|
||||
|
||||
## Branches
|
||||
|
||||
For a stable release, choose the `master` branch or one of the [tagged
|
||||
releases](https://github.com/ripple/rippled/releases).
|
||||
releases](https://github.com/XRPLF/rippled/releases).
|
||||
|
||||
```
|
||||
```bash
|
||||
git checkout master
|
||||
```
|
||||
|
||||
For the latest release candidate, choose the `release` branch.
|
||||
|
||||
```
|
||||
```bash
|
||||
git checkout release
|
||||
```
|
||||
|
||||
For the latest set of untested features, or to contribute, choose the `develop`
|
||||
branch.
|
||||
|
||||
```
|
||||
```bash
|
||||
git checkout develop
|
||||
```
|
||||
|
||||
@@ -33,133 +33,370 @@ git checkout develop
|
||||
|
||||
See [System Requirements](https://xrpl.org/system-requirements.html).
|
||||
|
||||
Building rippled generally requires git, Python, Conan, CMake, and a C++ compiler. Some guidance on setting up such a [C++ development environment can be found here](./docs/build/environment.md).
|
||||
Building xrpld generally requires git, Python, Conan, CMake, and a C++
|
||||
compiler. Some guidance on setting up such a [C++ development environment can be
|
||||
found here](./docs/build/environment.md).
|
||||
|
||||
- [Python 3.7](https://www.python.org/downloads/)
|
||||
- [Conan 1.55](https://conan.io/downloads.html)
|
||||
- [CMake 3.16](https://cmake.org/download/)
|
||||
- [Python 3.11](https://www.python.org/downloads/), or higher
|
||||
- [Conan 2.17](https://conan.io/downloads.html)[^1], or higher
|
||||
- [CMake 3.22](https://cmake.org/download/), or higher
|
||||
|
||||
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
|
||||
[^1]:
|
||||
It is possible to build with Conan 1.60+, but the instructions are
|
||||
significantly different, which is why we are not recommending it.
|
||||
|
||||
`xrpld` is written in the C++20 dialect and includes the `<concepts>` header.
|
||||
The [minimum compiler versions][2] required are:
|
||||
|
||||
| Compiler | Version |
|
||||
|-------------|---------|
|
||||
| GCC | 11 |
|
||||
| Clang | 13 |
|
||||
| Apple Clang | 13.1.6 |
|
||||
| MSVC | 19.23 |
|
||||
| Compiler | Version |
|
||||
| ----------- | --------- |
|
||||
| GCC | 12 |
|
||||
| Clang | 16 |
|
||||
| Apple Clang | 16 |
|
||||
| MSVC | 19.44[^3] |
|
||||
|
||||
### Linux
|
||||
|
||||
The Ubuntu operating system has received the highest level of
|
||||
quality assurance, testing, and support.
|
||||
The Ubuntu Linux distribution has received the highest level of quality
|
||||
assurance, testing, and support. We also support Red Hat and use Debian
|
||||
internally.
|
||||
|
||||
Here are [sample instructions for setting up a C++ development environment on Linux](./docs/build/environment.md#linux).
|
||||
Here are [sample instructions for setting up a C++ development environment on
|
||||
Linux](./docs/build/environment.md#linux).
|
||||
|
||||
### Mac
|
||||
|
||||
Many rippled engineers use macOS for development.
|
||||
Many xrpld engineers use macOS for development.
|
||||
|
||||
Here are [sample instructions for setting up a C++ development environment on macOS](./docs/build/environment.md#macos).
|
||||
Here are [sample instructions for setting up a C++ development environment on
|
||||
macOS](./docs/build/environment.md#macos).
|
||||
|
||||
### Windows
|
||||
|
||||
Windows is not recommended for production use at this time.
|
||||
Windows is used by some engineers for development only.
|
||||
|
||||
- Additionally, 32-bit Windows development is not supported.
|
||||
- Visual Studio 2022 is not yet supported.
|
||||
- rippled generally requires [Boost][] 1.77, which Conan cannot build with VS 2022.
|
||||
- Until rippled is updated for compatibility with later versions of Boost, Windows developers may need to use Visual Studio 2019.
|
||||
|
||||
[Boost]: https://www.boost.org/
|
||||
[^3]: Windows is not recommended for production use.
|
||||
|
||||
## Steps
|
||||
|
||||
### Set Up Conan
|
||||
|
||||
After you have a [C++ development environment](./docs/build/environment.md) ready with Git, Python, Conan, CMake, and a C++ compiler, you may need to set up your Conan profile.
|
||||
After you have a [C++ development environment](./docs/build/environment.md) ready with Git, Python,
|
||||
Conan, CMake, and a C++ compiler, you may need to set up your Conan profile.
|
||||
|
||||
These instructions assume a basic familiarity with Conan and CMake.
|
||||
These instructions assume a basic familiarity with Conan and CMake. If you are
|
||||
unfamiliar with Conan, then please read [this crash course](./docs/build/conan.md) or the official
|
||||
[Getting Started][3] walkthrough.
|
||||
|
||||
If you are unfamiliar with Conan, then please read [this crash course](./docs/build/conan.md) or the official [Getting Started][3] walkthrough.
|
||||
#### Conan lockfile
|
||||
|
||||
You'll need at least one Conan profile:
|
||||
To achieve reproducible dependencies, we use a [Conan lockfile](https://docs.conan.io/2/tutorial/versioning/lockfiles.html),
|
||||
which has to be updated every time dependencies change.
|
||||
|
||||
```
|
||||
conan profile new default --detect
|
||||
```
|
||||
Please see the [instructions on how to regenerate the lockfile](conan/lockfile/README.md).
|
||||
|
||||
Update the compiler settings:
|
||||
#### Default profile
|
||||
|
||||
```
|
||||
conan profile update settings.compiler.cppstd=20 default
|
||||
```
|
||||
We recommend that you import the provided `conan/profiles/default` profile:
|
||||
|
||||
**Linux** developers will commonly have a default Conan [profile][] that compiles
|
||||
with GCC and links with libstdc++.
|
||||
If you are linking with libstdc++ (see profile setting `compiler.libcxx`),
|
||||
then you will need to choose the `libstdc++11` ABI:
|
||||
|
||||
```
|
||||
conan profile update settings.compiler.libcxx=libstdc++11 default
|
||||
```
|
||||
|
||||
**Windows** developers may need to use the x64 native build tools.
|
||||
An easy way to do that is to run the shortcut "x64 Native Tools Command
|
||||
Prompt" for the version of Visual Studio that you have installed.
|
||||
|
||||
Windows developers must also build `rippled` and its dependencies for the x64
|
||||
architecture:
|
||||
|
||||
```
|
||||
conan profile update settings.arch=x86_64 default
|
||||
```
|
||||
|
||||
### Multiple compilers
|
||||
|
||||
When `/usr/bin/g++` exists on a platform, it is the default cpp compiler. This
|
||||
default works for some users.
|
||||
|
||||
However, if this compiler cannot build rippled or its dependencies, then you can
|
||||
install another compiler and set Conan and CMake to use it.
|
||||
Update the `conf.tools.build:compiler_executables` setting in order to set the correct variables (`CMAKE_<LANG>_COMPILER`) in the
|
||||
generated CMake toolchain file.
|
||||
For example, on Ubuntu 20, you may have gcc at `/usr/bin/gcc` and g++ at `/usr/bin/g++`; if that is the case, you can select those compilers with:
|
||||
```
|
||||
conan profile update 'conf.tools.build:compiler_executables={"c": "/usr/bin/gcc", "cpp": "/usr/bin/g++"}' default
|
||||
```bash
|
||||
conan config install conan/profiles/ -tf $(conan config home)/profiles/
|
||||
```
|
||||
|
||||
Replace `/usr/bin/gcc` and `/usr/bin/g++` with paths to the desired compilers.
|
||||
You can check your Conan profile by running:
|
||||
|
||||
It should choose the compiler for dependencies as well,
|
||||
but not all of them have a Conan recipe that respects this setting (yet).
|
||||
For the rest, you can set these environment variables.
|
||||
Replace `<path>` with paths to the desired compilers:
|
||||
```bash
|
||||
conan profile show
|
||||
```
|
||||
|
||||
- `conan profile update env.CC=<path> default`
|
||||
- `conan profile update env.CXX=<path> default`
|
||||
#### Custom profile
|
||||
|
||||
Export our [Conan recipe for Snappy](./external/snappy).
|
||||
It does not explicitly link the C++ standard library,
|
||||
which allows you to statically link it with GCC, if you want.
|
||||
If the default profile does not work for you and you do not yet have a Conan
|
||||
profile, you can create one by running:
|
||||
|
||||
```
|
||||
conan export external/snappy snappy/1.1.10@
|
||||
```
|
||||
```bash
|
||||
conan profile detect
|
||||
```
|
||||
|
||||
Export our [Conan recipe for RocksDB](./external/rocksdb).
|
||||
It does not override paths to dependencies when building with Visual Studio.
|
||||
You may need to make changes to the profile to suit your environment. You can
|
||||
refer to the provided `conan/profiles/default` profile for inspiration, and you
|
||||
may also need to apply the required [tweaks](#conan-profile-tweaks) to this
|
||||
default profile.
|
||||
|
||||
```
|
||||
conan export external/rocksdb rocksdb/6.29.5@
|
||||
```
|
||||
### Patched recipes
|
||||
|
||||
Export our [Conan recipe for SOCI](./external/soci).
|
||||
It patches their CMake to correctly import its dependencies.
|
||||
The recipes in Conan Center occasionally need to be patched for compatibility
|
||||
with the latest version of `xrpld`. We maintain a fork of the Conan Center
|
||||
[here](https://github.com/XRPLF/conan-center-index/) containing the patches.
|
||||
|
||||
```
|
||||
conan export external/soci soci/4.0.3@
|
||||
```
|
||||
To ensure our patched recipes are used, you must add our Conan remote at a
|
||||
higher index than the default Conan Center remote, so it is consulted first. You
|
||||
can do this by running:
|
||||
|
||||
```bash
|
||||
conan remote add --index 0 xrplf https://conan.ripplex.io
|
||||
```
|
||||
|
||||
Alternatively, you can pull the patched recipes into the repository and use them
|
||||
locally:
|
||||
|
||||
```bash
|
||||
# Extract the version number from the lockfile.
|
||||
function extract_version {
|
||||
version=$(cat conan.lock | sed -nE "s@.+${1}/(.+)#.+@\1@p" | head -n1)
|
||||
echo ${version}
|
||||
}
|
||||
|
||||
# Define which recipes to export.
|
||||
recipes=('ed25519' 'grpc' 'nudb' 'openssl' 'secp256k1' 'snappy' 'soci')
|
||||
folders=('all' 'all' 'all' '3.x.x' 'all' 'all' 'all')
|
||||
|
||||
# Selectively check out the recipes from our CCI fork.
|
||||
cd external
|
||||
mkdir -p conan-center-index
|
||||
cd conan-center-index
|
||||
git init
|
||||
git remote add origin git@github.com:XRPLF/conan-center-index.git
|
||||
git sparse-checkout init
|
||||
for ((index = 1; index <= ${#recipes[@]}; index++)); do
|
||||
recipe=${recipes[index]}
|
||||
folder=${folders[index]}
|
||||
echo "Checking out recipe '${recipe}' from folder '${folder}'..."
|
||||
git sparse-checkout add recipes/${recipe}/${folder}
|
||||
done
|
||||
git fetch origin master
|
||||
git checkout master
|
||||
cd ../..
|
||||
|
||||
# Export the recipes into the local cache.
|
||||
for ((index = 1; index <= ${#recipes[@]}; index++)); do
|
||||
recipe=${recipes[index]}
|
||||
folder=${folders[index]}
|
||||
version=$(extract_version ${recipe})
|
||||
echo "Exporting '${recipe}/${version}' from '${recipe}/${folder}'..."
|
||||
conan export --version $(extract_version ${recipe}) \
|
||||
external/conan-center-index/recipes/${recipe}/${folder}
|
||||
done
|
||||
```
|
||||
|
||||
In the case we switch to a newer version of a dependency that still requires a
|
||||
patch, it will be necessary for you to pull in the changes and re-export the
|
||||
updated dependencies with the newer version. However, if we switch to a newer
|
||||
version that no longer requires a patch, no action is required on your part, as
|
||||
the new recipe will be automatically pulled from the official Conan Center.
|
||||
|
||||
> [!NOTE]
|
||||
> You might need to add `--lockfile=""` to your `conan install` command
|
||||
> to avoid automatic use of the existing `conan.lock` file when you run
|
||||
> `conan export` manually on your machine
|
||||
|
||||
### Conan profile tweaks
|
||||
|
||||
#### Missing compiler version
|
||||
|
||||
If you see an error similar to the following after running `conan profile show`:
|
||||
|
||||
```bash
|
||||
ERROR: Invalid setting '17' is not a valid 'settings.compiler.version' value.
|
||||
Possible values are ['5.0', '5.1', '6.0', '6.1', '7.0', '7.3', '8.0', '8.1',
|
||||
'9.0', '9.1', '10.0', '11.0', '12.0', '13', '13.0', '13.1', '14', '14.0', '15',
|
||||
'15.0', '16', '16.0']
|
||||
Read "http://docs.conan.io/2/knowledge/faq.html#error-invalid-setting"
|
||||
```
|
||||
|
||||
you need to amend the list of compiler versions in
|
||||
`$(conan config home)/settings.yml`, by appending the required version number(s)
|
||||
to the `version` array specific for your compiler. For example:
|
||||
|
||||
```yaml
|
||||
apple-clang:
|
||||
version:
|
||||
[
|
||||
"5.0",
|
||||
"5.1",
|
||||
"6.0",
|
||||
"6.1",
|
||||
"7.0",
|
||||
"7.3",
|
||||
"8.0",
|
||||
"8.1",
|
||||
"9.0",
|
||||
"9.1",
|
||||
"10.0",
|
||||
"11.0",
|
||||
"12.0",
|
||||
"13",
|
||||
"13.0",
|
||||
"13.1",
|
||||
"14",
|
||||
"14.0",
|
||||
"15",
|
||||
"15.0",
|
||||
"16",
|
||||
"16.0",
|
||||
"17",
|
||||
"17.0",
|
||||
]
|
||||
```
|
||||
|
||||
#### Multiple compilers
|
||||
|
||||
If you have multiple compilers installed, make sure to select the one to use in
|
||||
your default Conan configuration **before** running `conan profile detect`, by
|
||||
setting the `CC` and `CXX` environment variables.
|
||||
|
||||
For example, if you are running MacOS and have [homebrew
|
||||
LLVM@18](https://formulae.brew.sh/formula/llvm@18), and want to use it as a
|
||||
compiler in the new Conan profile:
|
||||
|
||||
```bash
|
||||
export CC=$(brew --prefix llvm@18)/bin/clang
|
||||
export CXX=$(brew --prefix llvm@18)/bin/clang++
|
||||
conan profile detect
|
||||
```
|
||||
|
||||
You should also explicitly set the path to the compiler in the profile file,
|
||||
which helps to avoid errors when `CC` and/or `CXX` are set and disagree with the
|
||||
selected Conan profile. For example:
|
||||
|
||||
```text
|
||||
[conf]
|
||||
tools.build:compiler_executables={'c':'/usr/bin/gcc','cpp':'/usr/bin/g++'}
|
||||
```
|
||||
|
||||
#### Multiple profiles
|
||||
|
||||
You can manage multiple Conan profiles in the directory
|
||||
`$(conan config home)/profiles`, for example renaming `default` to a different
|
||||
name and then creating a new `default` profile for a different compiler.
|
||||
|
||||
#### Select language
|
||||
|
||||
The default profile created by Conan will typically select different C++ dialect
|
||||
than C++20 used by this project. You should set `20` in the profile line
|
||||
starting with `compiler.cppstd=`. For example:
|
||||
|
||||
```bash
|
||||
sed -i.bak -e 's|^compiler\.cppstd=.*$|compiler.cppstd=20|' $(conan config home)/profiles/default
|
||||
```
|
||||
|
||||
#### Select standard library in Linux
|
||||
|
||||
**Linux** developers will commonly have a default Conan [profile][] that
|
||||
compiles with GCC and links with libstdc++. If you are linking with libstdc++
|
||||
(see profile setting `compiler.libcxx`), then you will need to choose the
|
||||
`libstdc++11` ABI:
|
||||
|
||||
```bash
|
||||
sed -i.bak -e 's|^compiler\.libcxx=.*$|compiler.libcxx=libstdc++11|' $(conan config home)/profiles/default
|
||||
```
|
||||
|
||||
#### Select architecture and runtime in Windows
|
||||
|
||||
**Windows** developers may need to use the x64 native build tools. An easy way
|
||||
to do that is to run the shortcut "x64 Native Tools Command Prompt" for the
|
||||
version of Visual Studio that you have installed.
|
||||
|
||||
Windows developers must also build `xrpld` and its dependencies for the x64
|
||||
architecture:
|
||||
|
||||
```bash
|
||||
sed -i.bak -e 's|^arch=.*$|arch=x86_64|' $(conan config home)/profiles/default
|
||||
```
|
||||
|
||||
**Windows** developers also must select static runtime:
|
||||
|
||||
```bash
|
||||
sed -i.bak -e 's|^compiler\.runtime=.*$|compiler.runtime=static|' $(conan config home)/profiles/default
|
||||
```
|
||||
|
||||
#### Clang workaround for grpc
|
||||
|
||||
If your compiler is clang, version 19 or later, or apple-clang, version 17 or
|
||||
later, you may encounter a compilation error while building the `grpc`
|
||||
dependency:
|
||||
|
||||
```text
|
||||
In file included from .../lib/promise/try_seq.h:26:
|
||||
.../lib/promise/detail/basic_seq.h:499:38: error: a template argument list is expected after a name prefixed by the template keyword [-Wmissing-template-arg-list-after-template-kw]
|
||||
499 | Traits::template CallSeqFactory(f_, *cur_, std::move(arg)));
|
||||
| ^
|
||||
```
|
||||
|
||||
The workaround for this error is to add two lines to profile:
|
||||
|
||||
```text
|
||||
[conf]
|
||||
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
|
||||
```
|
||||
|
||||
#### Workaround for gcc 12
|
||||
|
||||
If your compiler is gcc, version 12, and you have enabled `werr` option, you may
|
||||
encounter a compilation error such as:
|
||||
|
||||
```text
|
||||
/usr/include/c++/12/bits/char_traits.h:435:56: error: 'void* __builtin_memcpy(void*, const void*, long unsigned int)' accessing 9223372036854775810 or more bytes at offsets [2, 9223372036854775807] and 1 may overlap up to 9223372036854775813 bytes at offset -3 [-Werror=restrict]
|
||||
435 | return static_cast<char_type*>(__builtin_memcpy(__s1, __s2, __n));
|
||||
| ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~
|
||||
cc1plus: all warnings being treated as errors
|
||||
```
|
||||
|
||||
The workaround for this error is to add two lines to your profile:
|
||||
|
||||
```text
|
||||
[conf]
|
||||
tools.build:cxxflags=['-Wno-restrict']
|
||||
```
|
||||
|
||||
#### Workaround for clang 16
|
||||
|
||||
If your compiler is clang, version 16, you may encounter compilation error such
|
||||
as:
|
||||
|
||||
```text
|
||||
In file included from .../boost/beast/websocket/stream.hpp:2857:
|
||||
.../boost/beast/websocket/impl/read.hpp:695:17: error: call to 'async_teardown' is ambiguous
|
||||
async_teardown(impl.role, impl.stream(),
|
||||
^~~~~~~~~~~~~~
|
||||
```
|
||||
|
||||
The workaround for this error is to add two lines to your profile:
|
||||
|
||||
```text
|
||||
[conf]
|
||||
tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS']
|
||||
```
|
||||
|
||||
### Set Up Ccache
|
||||
|
||||
To speed up repeated compilations, we recommend that you install
|
||||
[ccache](https://ccache.dev), a tool that wraps your compiler so that it can
|
||||
cache build objects locally.
|
||||
|
||||
#### Linux
|
||||
|
||||
You can install it using the package manager, e.g. `sudo apt install ccache`
|
||||
(Ubuntu) or `sudo dnf install ccache` (RHEL).
|
||||
|
||||
#### macOS
|
||||
|
||||
You can install it using Homebrew, i.e. `brew install ccache`.
|
||||
|
||||
#### Windows
|
||||
|
||||
You can install it using Chocolatey, i.e. `choco install ccache`. If you already
|
||||
have Ccache installed, then `choco upgrade ccache` will update it to the latest
|
||||
version. However, if you see an error such as:
|
||||
|
||||
```
|
||||
terminate called after throwing an instance of 'std::bad_alloc'
|
||||
what(): std::bad_alloc
|
||||
C:\Program Files\Microsoft Visual Studio\2022\Community\MSBuild\Microsoft\VC\v170\Microsoft.CppCommon.targets(617,5): error MSB6006: "cl.exe" exited with code 3.
|
||||
```
|
||||
|
||||
then please install a specific version of Ccache that we know works, via: `choco
|
||||
install ccache --version 4.11.3 --allow-downgrade`.
|
||||
|
||||
### Build and Test
|
||||
|
||||
@@ -179,63 +416,54 @@ It patches their CMake to correctly import its dependencies.
|
||||
the `install-folder` or `-if` option to every `conan install` command
|
||||
in the next step.
|
||||
|
||||
2. Generate CMake files for every configuration you want to build.
|
||||
2. Use conan to generate CMake files for every configuration you want to build:
|
||||
|
||||
```
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release
|
||||
conan install .. --output-folder . --build missing --settings build_type=Debug
|
||||
```
|
||||
```
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release
|
||||
conan install .. --output-folder . --build missing --settings build_type=Debug
|
||||
```
|
||||
|
||||
For a single-configuration generator, e.g. `Unix Makefiles` or `Ninja`,
|
||||
you only need to run this command once.
|
||||
For a multi-configuration generator, e.g. `Visual Studio`, you may want to
|
||||
run it more than once.
|
||||
To build Debug, in the next step, be sure to set `-DCMAKE_BUILD_TYPE=Debug`
|
||||
|
||||
Each of these commands should also have a different `build_type` setting.
|
||||
A second command with the same `build_type` setting will overwrite the files
|
||||
generated by the first. You can pass the build type on the command line with
|
||||
`--settings build_type=$BUILD_TYPE` or in the profile itself,
|
||||
under the section `[settings]` with the key `build_type`.
|
||||
|
||||
If you are using a Microsoft Visual C++ compiler,
|
||||
then you will need to ensure consistency between the `build_type` setting
|
||||
and the `compiler.runtime` setting.
|
||||
|
||||
When `build_type` is `Release`, `compiler.runtime` should be `MT`.
|
||||
|
||||
When `build_type` is `Debug`, `compiler.runtime` should be `MTd`.
|
||||
For a single-configuration generator, e.g. `Unix Makefiles` or `Ninja`,
|
||||
you only need to run this command once.
|
||||
For a multi-configuration generator, e.g. `Visual Studio`, you may want to
|
||||
run it more than once.
|
||||
|
||||
```
|
||||
conan install .. --output-folder . --build missing --settings build_type=Release --settings compiler.runtime=MT
|
||||
conan install .. --output-folder . --build missing --settings build_type=Debug --settings compiler.runtime=MTd
|
||||
```
|
||||
Each of these commands should also have a different `build_type` setting.
|
||||
A second command with the same `build_type` setting will overwrite the files
|
||||
generated by the first. You can pass the build type on the command line with
|
||||
`--settings build_type=$BUILD_TYPE` or in the profile itself,
|
||||
under the section `[settings]` with the key `build_type`.
|
||||
|
||||
3. Configure CMake and pass the toolchain file generated by Conan, located at
|
||||
`$OUTPUT_FOLDER/build/generators/conan_toolchain.cmake`.
|
||||
|
||||
Single-config generators:
|
||||
Single-config generators:
|
||||
|
||||
```
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
```
|
||||
Pass the CMake variable [`CMAKE_BUILD_TYPE`][build_type]
|
||||
and make sure it matches the one of the `build_type` settings
|
||||
you chose in the previous step.
|
||||
|
||||
Pass the CMake variable [`CMAKE_BUILD_TYPE`][build_type]
|
||||
and make sure it matches the `build_type` setting you chose in the previous
|
||||
step.
|
||||
For example, to build Debug, in the next command, replace "Release" with "Debug"
|
||||
|
||||
Multi-config generators:
|
||||
```
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dxrpld=ON -Dtests=ON ..
|
||||
```
|
||||
|
||||
```
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ..
|
||||
```
|
||||
Multi-config generators:
|
||||
|
||||
**Note:** You can pass build options for `rippled` in this step.
|
||||
```
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -Dxrpld=ON -Dtests=ON ..
|
||||
```
|
||||
|
||||
4. Build `rippled`.
|
||||
**Note:** You can pass build options for `xrpld` in this step.
|
||||
|
||||
4. Build `xrpld`.
|
||||
|
||||
For a single-configuration generator, it will build whatever configuration
|
||||
you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator,
|
||||
you must pass the option `--config` to select the build configuration.
|
||||
you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator, you
|
||||
must pass the option `--config` to select the build configuration.
|
||||
|
||||
Single-config generators:
|
||||
|
||||
@@ -250,24 +478,27 @@ It patches their CMake to correctly import its dependencies.
|
||||
cmake --build . --config Debug
|
||||
```
|
||||
|
||||
5. Test rippled.
|
||||
5. Test xrpld.
|
||||
|
||||
Single-config generators:
|
||||
|
||||
```
|
||||
./rippled --unittest
|
||||
./xrpld --unittest --unittest-jobs N
|
||||
```
|
||||
|
||||
Multi-config generators:
|
||||
|
||||
```
|
||||
./Release/rippled --unittest
|
||||
./Debug/rippled --unittest
|
||||
./Release/xrpld --unittest --unittest-jobs N
|
||||
./Debug/xrpld --unittest --unittest-jobs N
|
||||
```
|
||||
|
||||
The location of `rippled` in your build directory depends on your CMake
|
||||
generator. Pass `--help` to see the rest of the command line options.
|
||||
Replace the `--unittest-jobs` parameter N with the desired unit tests
|
||||
concurrency. Recommended setting is half of the number of available CPU
|
||||
cores.
|
||||
|
||||
The location of `xrpld` binary in your build directory depends on your
|
||||
CMake generator. Pass `--help` to see the rest of the command line options.
|
||||
|
||||
## Coverage report
|
||||
|
||||
@@ -285,20 +516,20 @@ Prerequisites for the coverage report:
|
||||
|
||||
A coverage report is created when the following steps are completed, in order:
|
||||
|
||||
1. `rippled` binary built with instrumentation data, enabled by the `coverage`
|
||||
1. `xrpld` binary built with instrumentation data, enabled by the `coverage`
|
||||
option mentioned above
|
||||
2. completed run of unit tests, which populates coverage capture data
|
||||
2. completed one or more run of the unit tests, which populates coverage capture data
|
||||
3. completed run of the `gcovr` tool (which internally invokes either `gcov` or `llvm-cov`)
|
||||
to assemble both instrumentation data and the coverage capture data into a coverage report
|
||||
|
||||
The above steps are automated into a single target `coverage`. The instrumented
|
||||
`rippled` binary can also be used for regular development or testing work, at
|
||||
The last step of the above is automated into a single target `coverage`. The instrumented
|
||||
`xrpld` binary can also be used for regular development or testing work, at
|
||||
the cost of extra disk space utilization and a small performance hit
|
||||
(to store coverage capture). In case of a spurious failure of unit tests, it is
|
||||
possible to re-run the `coverage` target without rebuilding the `rippled` binary
|
||||
(since it is simply a dependency of the coverage report target). It is also possible
|
||||
to select only specific tests for the purpose of the coverage report, by setting
|
||||
the `coverage_test` variable in `cmake`
|
||||
(to store coverage capture data). Since `xrpld` binary is simply a dependency of the
|
||||
coverage report target, it is possible to re-run the `coverage` target without
|
||||
rebuilding the `xrpld` binary. Note, running of the unit tests before the `coverage`
|
||||
target is left to the developer. Each such run will append to the coverage data
|
||||
collected in the build directory.
|
||||
|
||||
The default coverage report format is `html-details`, but the user
|
||||
can override it to any of the formats listed in `Builds/CMake/CodeCoverage.cmake`
|
||||
@@ -307,105 +538,100 @@ to generate more than one format at a time by setting the `coverage_extra_args`
|
||||
variable in `cmake`. The specific command line used to run the `gcovr` tool will be
|
||||
displayed if the `CODE_COVERAGE_VERBOSE` variable is set.
|
||||
|
||||
By default, the code coverage tool runs parallel unit tests with `--unittest-jobs`
|
||||
set to the number of available CPU cores. This may cause spurious test
|
||||
errors on Apple. Developers can override the number of unit test jobs with
|
||||
the `coverage_test_parallelism` variable in `cmake`.
|
||||
|
||||
Example use with some cmake variables set:
|
||||
|
||||
```
|
||||
cd .build
|
||||
conan install .. --output-folder . --build missing --settings build_type=Debug
|
||||
cmake -DCMAKE_BUILD_TYPE=Debug -Dcoverage=ON -Dcoverage_test_parallelism=2 -Dcoverage_format=html-details -Dcoverage_extra_args="--json coverage.json" -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ..
|
||||
cmake -DCMAKE_BUILD_TYPE=Debug -Dcoverage=ON -Dxrpld=ON -Dtests=ON -Dcoverage_test_parallelism=2 -Dcoverage_format=html-details -Dcoverage_extra_args="--json coverage.json" -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ..
|
||||
cmake --build . --target coverage
|
||||
```
|
||||
|
||||
After the `coverage` target is completed, the generated coverage report will be
|
||||
stored inside the build directory, as either of:
|
||||
|
||||
- file named `coverage.`_extension_ , with a suitable extension for the report format, or
|
||||
- file named `coverage.`_extension_, with a suitable extension for the report format, or
|
||||
- directory named `coverage`, with the `index.html` and other files inside, for the `html-details` or `html-nested` report formats.
|
||||
|
||||
## Sanitizers
|
||||
|
||||
To build dependencies and xrpld with sanitizer instrumentation, set the
|
||||
`SANITIZERS` environment variable (only once before running conan and cmake) and use the `sanitizers` profile in conan:
|
||||
|
||||
```bash
|
||||
export SANITIZERS=address,undefinedbehavior
|
||||
|
||||
conan install .. --output-folder . --profile:all sanitizers --build missing --settings build_type=Debug
|
||||
|
||||
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Debug -Dxrpld=ON -Dtests=ON ..
|
||||
```
|
||||
|
||||
See [Sanitizers docs](./docs/build/sanitizers.md) for more details.
|
||||
|
||||
## Options
|
||||
|
||||
| Option | Default Value | Description |
|
||||
| --- | ---| ---|
|
||||
| `assert` | OFF | Enable assertions.
|
||||
| `reporting` | OFF | Build the reporting mode feature. |
|
||||
| `coverage` | OFF | Prepare the coverage report. |
|
||||
| `tests` | ON | Build tests. |
|
||||
| `unity` | ON | Configure a unity build. |
|
||||
| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. |
|
||||
|
||||
[Unity builds][5] may be faster for the first build
|
||||
(at the cost of much more memory) since they concatenate sources into fewer
|
||||
translation units. Non-unity builds may be faster for incremental builds,
|
||||
and can be helpful for detecting `#include` omissions.
|
||||
| Option | Default Value | Description |
|
||||
| ---------- | ------------- | -------------------------------------------------------------- |
|
||||
| `assert` | OFF | Enable assertions. |
|
||||
| `coverage` | OFF | Prepare the coverage report. |
|
||||
| `tests` | OFF | Build tests. |
|
||||
| `unity` | OFF | Configure a unity build. |
|
||||
| `xrpld` | OFF | Build the xrpld application, and not just the libxrpl library. |
|
||||
| `werr` | OFF | Treat compilation warnings as errors |
|
||||
| `wextra` | OFF | Enable additional compilation warnings |
|
||||
|
||||
[Unity builds][5] may be faster for the first build (at the cost of much more
|
||||
memory) since they concatenate sources into fewer translation units. Non-unity
|
||||
builds may be faster for incremental builds, and can be helpful for detecting
|
||||
`#include` omissions.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
|
||||
### Conan
|
||||
|
||||
After any updates or changes to dependencies, you may need to do the following:
|
||||
|
||||
1. Remove your build directory.
|
||||
2. Remove the Conan cache:
|
||||
2. Remove individual libraries from the Conan cache, e.g.
|
||||
|
||||
```bash
|
||||
conan remove 'grpc/*'
|
||||
```
|
||||
rm -rf ~/.conan/data
|
||||
|
||||
**or**
|
||||
|
||||
Remove all libraries from Conan cache:
|
||||
|
||||
```bash
|
||||
conan remove '*'
|
||||
```
|
||||
4. Re-run [conan install](#build-and-test).
|
||||
|
||||
3. Re-run [conan export](#patched-recipes) if needed.
|
||||
4. [Regenerate lockfile](#conan-lockfile).
|
||||
5. Re-run [conan install](#build-and-test).
|
||||
|
||||
### no std::result_of
|
||||
#### ERROR: Package not resolved
|
||||
|
||||
If your compiler version is recent enough to have removed `std::result_of` as
|
||||
part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor
|
||||
definition to your build.
|
||||
If you're seeing an error like `ERROR: Package 'snappy/1.1.10' not resolved: Unable to find 'snappy/1.1.10#968fef506ff261592ec30c574d4a7809%1756234314.246' in remotes.`,
|
||||
please add `xrplf` remote or re-run `conan export` for [patched recipes](#patched-recipes).
|
||||
|
||||
### `protobuf/port_def.inc` file not found
|
||||
|
||||
If `cmake --build .` results in an error due to a missing a protobuf file, then
|
||||
you might have generated CMake files for a different `build_type` than the
|
||||
`CMAKE_BUILD_TYPE` you passed to Conan.
|
||||
|
||||
```
|
||||
conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
||||
conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
||||
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
|
||||
conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
|
||||
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
|
||||
/xrpld/.build/pb-xrpl.libpb/xrpl/proto/xrpl.pb.h:10:10: fatal error: 'google/protobuf/port_def.inc' file not found
|
||||
10 | #include <google/protobuf/port_def.inc>
|
||||
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
1 error generated.
|
||||
```
|
||||
|
||||
For example, if you want to build Debug:
|
||||
|
||||
### call to 'async_teardown' is ambiguous
|
||||
|
||||
If you are compiling with an early version of Clang 16, then you might hit
|
||||
a [regression][6] when compiling C++20 that manifests as an [error in a Boost
|
||||
header][7]. You can workaround it by adding this preprocessor definition:
|
||||
|
||||
```
|
||||
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default
|
||||
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default
|
||||
```
|
||||
|
||||
|
||||
### recompile with -fPIC
|
||||
|
||||
If you get a linker error suggesting that you recompile Boost with
|
||||
position-independent code, such as:
|
||||
|
||||
```
|
||||
/usr/bin/ld.gold: error: /home/username/.conan/data/boost/1.77.0/_/_/package/.../lib/libboost_container.a(alloc_lib.o):
|
||||
requires unsupported dynamic reloc 11; recompile with -fPIC
|
||||
```
|
||||
|
||||
Conan most likely downloaded a bad binary distribution of the dependency.
|
||||
This seems to be a [bug][1] in Conan just for Boost 1.77.0 compiled with GCC
|
||||
for Linux. The solution is to build the dependency locally by passing
|
||||
`--build boost` when calling `conan install`.
|
||||
|
||||
```
|
||||
conan install --build boost ...
|
||||
```
|
||||
|
||||
1. For conan install, pass `--settings build_type=Debug`
|
||||
2. For cmake, pass `-DCMAKE_BUILD_TYPE=Debug`
|
||||
|
||||
## Add a Dependency
|
||||
|
||||
@@ -413,16 +639,15 @@ If you want to experiment with a new package, follow these steps:
|
||||
|
||||
1. Search for the package on [Conan Center](https://conan.io/center/).
|
||||
2. Modify [`conanfile.py`](./conanfile.py):
|
||||
- Add a version of the package to the `requires` property.
|
||||
- Change any default options for the package by adding them to the
|
||||
`default_options` property (with syntax `'$package:$option': $value`).
|
||||
- Add a version of the package to the `requires` property.
|
||||
- Change any default options for the package by adding them to the
|
||||
`default_options` property (with syntax `'$package:$option': $value`).
|
||||
3. Modify [`CMakeLists.txt`](./CMakeLists.txt):
|
||||
- Add a call to `find_package($package REQUIRED)`.
|
||||
- Link a library from the package to the target `ripple_libs`
|
||||
(search for the existing call to `target_link_libraries(ripple_libs INTERFACE ...)`).
|
||||
- Add a call to `find_package($package REQUIRED)`.
|
||||
- Link a library from the package to the target `ripple_libs`
|
||||
(search for the existing call to `target_link_libraries(ripple_libs INTERFACE ...)`).
|
||||
4. Start coding! Don't forget to include whatever headers you need from the package.
|
||||
|
||||
|
||||
[1]: https://github.com/conan-io/conan-center-index/issues/13168
|
||||
[2]: https://en.cppreference.com/w/cpp/compiler_support/20
|
||||
[3]: https://docs.conan.io/en/latest/getting_started.html
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
macro(group_sources_in source_dir curdir)
|
||||
file(GLOB children RELATIVE ${source_dir}/${curdir}
|
||||
${source_dir}/${curdir}/*)
|
||||
foreach (child ${children})
|
||||
if (IS_DIRECTORY ${source_dir}/${curdir}/${child})
|
||||
group_sources_in(${source_dir} ${curdir}/${child})
|
||||
else()
|
||||
string(REPLACE "/" "\\" groupname ${curdir})
|
||||
source_group(${groupname} FILES
|
||||
${source_dir}/${curdir}/${child})
|
||||
endif()
|
||||
endforeach()
|
||||
endmacro()
|
||||
|
||||
macro(group_sources curdir)
|
||||
group_sources_in(${PROJECT_SOURCE_DIR} ${curdir})
|
||||
endmacro()
|
||||
|
||||
macro (exclude_from_default target_)
|
||||
set_target_properties (${target_} PROPERTIES EXCLUDE_FROM_ALL ON)
|
||||
set_target_properties (${target_} PROPERTIES EXCLUDE_FROM_DEFAULT_BUILD ON)
|
||||
endmacro ()
|
||||
|
||||
macro (exclude_if_included target_)
|
||||
get_directory_property(has_parent PARENT_DIRECTORY)
|
||||
if (has_parent)
|
||||
exclude_from_default (${target_})
|
||||
endif ()
|
||||
endmacro ()
|
||||
|
||||
find_package(Git)
|
||||
|
||||
function (git_branch branch_val)
|
||||
if (NOT GIT_FOUND)
|
||||
return ()
|
||||
endif ()
|
||||
set (_branch "")
|
||||
execute_process (COMMAND ${GIT_EXECUTABLE} "rev-parse" "--abbrev-ref" "HEAD"
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
RESULT_VARIABLE _git_exit_code
|
||||
OUTPUT_VARIABLE _temp_branch
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
ERROR_QUIET)
|
||||
if (_git_exit_code EQUAL 0)
|
||||
set (_branch ${_temp_branch})
|
||||
endif ()
|
||||
set (${branch_val} "${_branch}" PARENT_SCOPE)
|
||||
endfunction ()
|
||||
@@ -1,453 +0,0 @@
|
||||
# Copyright (c) 2012 - 2017, Lars Bilke
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# 1. Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# 3. Neither the name of the copyright holder nor the names of its contributors
|
||||
# may be used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
# CHANGES:
|
||||
#
|
||||
# 2012-01-31, Lars Bilke
|
||||
# - Enable Code Coverage
|
||||
#
|
||||
# 2013-09-17, Joakim Söderberg
|
||||
# - Added support for Clang.
|
||||
# - Some additional usage instructions.
|
||||
#
|
||||
# 2016-02-03, Lars Bilke
|
||||
# - Refactored functions to use named parameters
|
||||
#
|
||||
# 2017-06-02, Lars Bilke
|
||||
# - Merged with modified version from github.com/ufz/ogs
|
||||
#
|
||||
# 2019-05-06, Anatolii Kurotych
|
||||
# - Remove unnecessary --coverage flag
|
||||
#
|
||||
# 2019-12-13, FeRD (Frank Dana)
|
||||
# - Deprecate COVERAGE_LCOVR_EXCLUDES and COVERAGE_GCOVR_EXCLUDES lists in favor
|
||||
# of tool-agnostic COVERAGE_EXCLUDES variable, or EXCLUDE setup arguments.
|
||||
# - CMake 3.4+: All excludes can be specified relative to BASE_DIRECTORY
|
||||
# - All setup functions: accept BASE_DIRECTORY, EXCLUDE list
|
||||
# - Set lcov basedir with -b argument
|
||||
# - Add automatic --demangle-cpp in lcovr, if 'c++filt' is available (can be
|
||||
# overridden with NO_DEMANGLE option in setup_target_for_coverage_lcovr().)
|
||||
# - Delete output dir, .info file on 'make clean'
|
||||
# - Remove Python detection, since version mismatches will break gcovr
|
||||
# - Minor cleanup (lowercase function names, update examples...)
|
||||
#
|
||||
# 2019-12-19, FeRD (Frank Dana)
|
||||
# - Rename Lcov outputs, make filtered file canonical, fix cleanup for targets
|
||||
#
|
||||
# 2020-01-19, Bob Apthorpe
|
||||
# - Added gfortran support
|
||||
#
|
||||
# 2020-02-17, FeRD (Frank Dana)
|
||||
# - Make all add_custom_target()s VERBATIM to auto-escape wildcard characters
|
||||
# in EXCLUDEs, and remove manual escaping from gcovr targets
|
||||
#
|
||||
# 2021-01-19, Robin Mueller
|
||||
# - Add CODE_COVERAGE_VERBOSE option which will allow to print out commands which are run
|
||||
# - Added the option for users to set the GCOVR_ADDITIONAL_ARGS variable to supply additional
|
||||
# flags to the gcovr command
|
||||
#
|
||||
# 2020-05-04, Mihchael Davis
|
||||
# - Add -fprofile-abs-path to make gcno files contain absolute paths
|
||||
# - Fix BASE_DIRECTORY not working when defined
|
||||
# - Change BYPRODUCT from folder to index.html to stop ninja from complaining about double defines
|
||||
#
|
||||
# 2021-05-10, Martin Stump
|
||||
# - Check if the generator is multi-config before warning about non-Debug builds
|
||||
#
|
||||
# 2022-02-22, Marko Wehle
|
||||
# - Change gcovr output from -o <filename> for --xml <filename> and --html <filename> output respectively.
|
||||
# This will allow for Multiple Output Formats at the same time by making use of GCOVR_ADDITIONAL_ARGS, e.g. GCOVR_ADDITIONAL_ARGS "--txt".
|
||||
#
|
||||
# 2022-09-28, Sebastian Mueller
|
||||
# - fix append_coverage_compiler_flags_to_target to correctly add flags
|
||||
# - replace "-fprofile-arcs -ftest-coverage" with "--coverage" (equivalent)
|
||||
#
|
||||
# 2024-01-04, Bronek Kozicki
|
||||
# - remove setup_target_for_coverage_lcov (slow) and setup_target_for_coverage_fastcov (no support for Clang)
|
||||
# - fix Clang support by adding find_program( ... llvm-cov )
|
||||
# - add Apple Clang support by adding execute_process( COMMAND xcrun -f llvm-cov ... )
|
||||
# - add CODE_COVERAGE_GCOV_TOOL to explicitly select gcov tool and disable find_program
|
||||
# - replace both functions setup_target_for_coverage_gcovr_* with a single setup_target_for_coverage_gcovr
|
||||
# - add support for all gcovr output formats
|
||||
#
|
||||
# 2024-04-03, Bronek Kozicki
|
||||
# - add support for output formats: jacoco, clover, lcov
|
||||
#
|
||||
# USAGE:
|
||||
#
|
||||
# 1. Copy this file into your cmake modules path.
|
||||
#
|
||||
# 2. Add the following line to your CMakeLists.txt (best inside an if-condition
|
||||
# using a CMake option() to enable it just optionally):
|
||||
# include(CodeCoverage)
|
||||
#
|
||||
# 3. Append necessary compiler flags for all supported source files:
|
||||
# append_coverage_compiler_flags()
|
||||
# Or for specific target:
|
||||
# append_coverage_compiler_flags_to_target(YOUR_TARGET_NAME)
|
||||
#
|
||||
# 3.a (OPTIONAL) Set appropriate optimization flags, e.g. -O0, -O1 or -Og
|
||||
#
|
||||
# 4. If you need to exclude additional directories from the report, specify them
|
||||
# using full paths in the COVERAGE_EXCLUDES variable before calling
|
||||
# setup_target_for_coverage_*().
|
||||
# Example:
|
||||
# set(COVERAGE_EXCLUDES
|
||||
# '${PROJECT_SOURCE_DIR}/src/dir1/*'
|
||||
# '/path/to/my/src/dir2/*')
|
||||
# Or, use the EXCLUDE argument to setup_target_for_coverage_*().
|
||||
# Example:
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# EXCLUDE "${PROJECT_SOURCE_DIR}/src/dir1/*" "/path/to/my/src/dir2/*")
|
||||
#
|
||||
# 4.a NOTE: With CMake 3.4+, COVERAGE_EXCLUDES or EXCLUDE can also be set
|
||||
# relative to the BASE_DIRECTORY (default: PROJECT_SOURCE_DIR)
|
||||
# Example:
|
||||
# set(COVERAGE_EXCLUDES "dir1/*")
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# FORMAT html-details
|
||||
# BASE_DIRECTORY "${PROJECT_SOURCE_DIR}/src"
|
||||
# EXCLUDE "dir2/*")
|
||||
#
|
||||
# 4.b If you need to pass specific options to gcovr, specify them in
|
||||
# GCOVR_ADDITIONAL_ARGS variable.
|
||||
# Example:
|
||||
# set (GCOVR_ADDITIONAL_ARGS --exclude-throw-branches --exclude-noncode-lines -s)
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME coverage
|
||||
# EXECUTABLE testrunner
|
||||
# EXCLUDE "src/dir1" "src/dir2")
|
||||
#
|
||||
# 5. Use the functions described below to create a custom make target which
|
||||
# runs your test executable and produces a code coverage report.
|
||||
#
|
||||
# 6. Build a Debug build:
|
||||
# cmake -DCMAKE_BUILD_TYPE=Debug ..
|
||||
# make
|
||||
# make my_coverage_target
|
||||
|
||||
include(CMakeParseArguments)
|
||||
|
||||
option(CODE_COVERAGE_VERBOSE "Verbose information" FALSE)
|
||||
|
||||
# Check prereqs
|
||||
find_program( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/scripts/test)
|
||||
|
||||
if(DEFINED CODE_COVERAGE_GCOV_TOOL)
|
||||
set(GCOV_TOOL "${CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif(DEFINED ENV{CODE_COVERAGE_GCOV_TOOL})
|
||||
set(GCOV_TOOL "$ENV{CODE_COVERAGE_GCOV_TOOL}")
|
||||
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if(APPLE)
|
||||
execute_process( COMMAND xcrun -f llvm-cov
|
||||
OUTPUT_VARIABLE LLVMCOV_PATH
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE
|
||||
)
|
||||
else()
|
||||
find_program( LLVMCOV_PATH llvm-cov )
|
||||
endif()
|
||||
if(LLVMCOV_PATH)
|
||||
set(GCOV_TOOL "${LLVMCOV_PATH} gcov")
|
||||
endif()
|
||||
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
|
||||
find_program( GCOV_PATH gcov )
|
||||
set(GCOV_TOOL "${GCOV_PATH}")
|
||||
endif()
|
||||
|
||||
# Check supported compiler (Clang, GNU and Flang)
|
||||
get_property(LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
|
||||
foreach(LANG ${LANGUAGES})
|
||||
if("${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(Apple)?[Cc]lang")
|
||||
if("${CMAKE_${LANG}_COMPILER_VERSION}" VERSION_LESS 3)
|
||||
message(FATAL_ERROR "Clang version must be 3.0.0 or greater! Aborting...")
|
||||
endif()
|
||||
elseif(NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "GNU"
|
||||
AND NOT "${CMAKE_${LANG}_COMPILER_ID}" MATCHES "(LLVM)?[Ff]lang")
|
||||
message(FATAL_ERROR "Compiler is not GNU or Flang! Aborting...")
|
||||
endif()
|
||||
endforeach()
|
||||
|
||||
set(COVERAGE_COMPILER_FLAGS "-g --coverage"
|
||||
CACHE INTERNAL "")
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "(GNU|Clang)")
|
||||
include(CheckCXXCompilerFlag)
|
||||
check_cxx_compiler_flag(-fprofile-abs-path HAVE_cxx_fprofile_abs_path)
|
||||
if(HAVE_cxx_fprofile_abs_path)
|
||||
set(COVERAGE_CXX_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif()
|
||||
include(CheckCCompilerFlag)
|
||||
check_c_compiler_flag(-fprofile-abs-path HAVE_c_fprofile_abs_path)
|
||||
if(HAVE_c_fprofile_abs_path)
|
||||
set(COVERAGE_C_COMPILER_FLAGS "${COVERAGE_COMPILER_FLAGS} -fprofile-abs-path")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(CMAKE_Fortran_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the Fortran compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_CXX_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the C++ compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_C_FLAGS_COVERAGE
|
||||
${COVERAGE_COMPILER_FLAGS}
|
||||
CACHE STRING "Flags used by the C compiler during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||
""
|
||||
CACHE STRING "Flags used for linking binaries during coverage builds."
|
||||
FORCE )
|
||||
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE
|
||||
""
|
||||
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
|
||||
FORCE )
|
||||
mark_as_advanced(
|
||||
CMAKE_Fortran_FLAGS_COVERAGE
|
||||
CMAKE_CXX_FLAGS_COVERAGE
|
||||
CMAKE_C_FLAGS_COVERAGE
|
||||
CMAKE_EXE_LINKER_FLAGS_COVERAGE
|
||||
CMAKE_SHARED_LINKER_FLAGS_COVERAGE )
|
||||
|
||||
get_property(GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
if(NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG))
|
||||
message(WARNING "Code coverage results with an optimised (non-Debug) build may be misleading")
|
||||
endif() # NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR GENERATOR_IS_MULTI_CONFIG)
|
||||
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
link_libraries(gcov)
|
||||
endif()
|
||||
|
||||
# Defines a target for running and collection code coverage information
|
||||
# Builds dependencies, runs the given executable and outputs reports.
|
||||
# NOTE! The executable should always have a ZERO as exit code otherwise
|
||||
# the coverage generation will not complete.
|
||||
#
|
||||
# setup_target_for_coverage_gcovr(
|
||||
# NAME ctest_coverage # New target name
|
||||
# EXECUTABLE ctest -j ${PROCESSOR_COUNT} # Executable in PROJECT_BINARY_DIR
|
||||
# DEPENDENCIES executable_target # Dependencies to build first
|
||||
# BASE_DIRECTORY "../" # Base directory for report
|
||||
# # (defaults to PROJECT_SOURCE_DIR)
|
||||
# FORMAT "cobertura" # Output format, one of:
|
||||
# # xml cobertura sonarqube jacoco clover
|
||||
# # json-summary json-details coveralls csv
|
||||
# # txt html-single html-nested html-details
|
||||
# # lcov (xml is an alias to cobertura;
|
||||
# # if no format is set, defaults to xml)
|
||||
# EXCLUDE "src/dir1/*" "src/dir2/*" # Patterns to exclude (can be relative
|
||||
# # to BASE_DIRECTORY, with CMake 3.4+)
|
||||
# )
|
||||
# The user can set the variable GCOVR_ADDITIONAL_ARGS to supply additional flags to the
|
||||
# GCVOR command.
|
||||
function(setup_target_for_coverage_gcovr)
|
||||
set(options NONE)
|
||||
set(oneValueArgs BASE_DIRECTORY NAME FORMAT)
|
||||
set(multiValueArgs EXCLUDE EXECUTABLE EXECUTABLE_ARGS DEPENDENCIES)
|
||||
cmake_parse_arguments(Coverage "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
|
||||
|
||||
if(NOT GCOV_TOOL)
|
||||
message(FATAL_ERROR "Could not find gcov or llvm-cov tool! Aborting...")
|
||||
endif()
|
||||
|
||||
if(NOT GCOVR_PATH)
|
||||
message(FATAL_ERROR "Could not find gcovr tool! Aborting...")
|
||||
endif()
|
||||
|
||||
# Set base directory (as absolute path), or default to PROJECT_SOURCE_DIR
|
||||
if(DEFINED Coverage_BASE_DIRECTORY)
|
||||
get_filename_component(BASEDIR ${Coverage_BASE_DIRECTORY} ABSOLUTE)
|
||||
else()
|
||||
set(BASEDIR ${PROJECT_SOURCE_DIR})
|
||||
endif()
|
||||
|
||||
if(NOT DEFINED Coverage_FORMAT)
|
||||
set(Coverage_FORMAT xml)
|
||||
endif()
|
||||
|
||||
if("--output" IN_LIST GCOVR_ADDITIONAL_ARGS)
|
||||
message(FATAL_ERROR "Unsupported --output option detected in GCOVR_ADDITIONAL_ARGS! Aborting...")
|
||||
else()
|
||||
if((Coverage_FORMAT STREQUAL "html-details")
|
||||
OR (Coverage_FORMAT STREQUAL "html-nested"))
|
||||
set(GCOVR_OUTPUT_FILE ${PROJECT_BINARY_DIR}/${Coverage_NAME}/index.html)
|
||||
set(GCOVR_CREATE_FOLDER ${PROJECT_BINARY_DIR}/${Coverage_NAME})
|
||||
elseif(Coverage_FORMAT STREQUAL "html-single")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.html)
|
||||
elseif((Coverage_FORMAT STREQUAL "json-summary")
|
||||
OR (Coverage_FORMAT STREQUAL "json-details")
|
||||
OR (Coverage_FORMAT STREQUAL "coveralls"))
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.json)
|
||||
elseif(Coverage_FORMAT STREQUAL "txt")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.txt)
|
||||
elseif(Coverage_FORMAT STREQUAL "csv")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.csv)
|
||||
elseif(Coverage_FORMAT STREQUAL "lcov")
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.lcov)
|
||||
else()
|
||||
set(GCOVR_OUTPUT_FILE ${Coverage_NAME}.xml)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if((Coverage_FORMAT STREQUAL "cobertura")
|
||||
OR (Coverage_FORMAT STREQUAL "xml"))
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --cobertura-pretty )
|
||||
set(Coverage_FORMAT cobertura) # overwrite xml
|
||||
elseif(Coverage_FORMAT STREQUAL "sonarqube")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --sonarqube "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "jacoco")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --jacoco "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --jacoco-pretty )
|
||||
elseif(Coverage_FORMAT STREQUAL "clover")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --clover "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --clover-pretty )
|
||||
elseif(Coverage_FORMAT STREQUAL "lcov")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --lcov "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "json-summary")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-summary-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "json-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --json-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "coveralls")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --coveralls-pretty)
|
||||
elseif(Coverage_FORMAT STREQUAL "csv")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --csv "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "txt")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --txt "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "html-single")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html "${GCOVR_OUTPUT_FILE}" )
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-self-contained)
|
||||
elseif(Coverage_FORMAT STREQUAL "html-nested")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-nested "${GCOVR_OUTPUT_FILE}" )
|
||||
elseif(Coverage_FORMAT STREQUAL "html-details")
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS --html-details "${GCOVR_OUTPUT_FILE}" )
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported output style ${Coverage_FORMAT}! Aborting...")
|
||||
endif()
|
||||
|
||||
# Collect excludes (CMake 3.4+: Also compute absolute paths)
|
||||
set(GCOVR_EXCLUDES "")
|
||||
foreach(EXCLUDE ${Coverage_EXCLUDE} ${COVERAGE_EXCLUDES} ${COVERAGE_GCOVR_EXCLUDES})
|
||||
if(CMAKE_VERSION VERSION_GREATER 3.4)
|
||||
get_filename_component(EXCLUDE ${EXCLUDE} ABSOLUTE BASE_DIR ${BASEDIR})
|
||||
endif()
|
||||
list(APPEND GCOVR_EXCLUDES "${EXCLUDE}")
|
||||
endforeach()
|
||||
list(REMOVE_DUPLICATES GCOVR_EXCLUDES)
|
||||
|
||||
# Combine excludes to several -e arguments
|
||||
set(GCOVR_EXCLUDE_ARGS "")
|
||||
foreach(EXCLUDE ${GCOVR_EXCLUDES})
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "-e")
|
||||
list(APPEND GCOVR_EXCLUDE_ARGS "${EXCLUDE}")
|
||||
endforeach()
|
||||
|
||||
# Set up commands which will be run to generate coverage data
|
||||
# Run tests
|
||||
set(GCOVR_EXEC_TESTS_CMD
|
||||
${Coverage_EXECUTABLE} ${Coverage_EXECUTABLE_ARGS}
|
||||
)
|
||||
|
||||
# Create folder
|
||||
if(DEFINED GCOVR_CREATE_FOLDER)
|
||||
set(GCOVR_FOLDER_CMD
|
||||
${CMAKE_COMMAND} -E make_directory ${GCOVR_CREATE_FOLDER})
|
||||
else()
|
||||
set(GCOVR_FOLDER_CMD echo) # dummy
|
||||
endif()
|
||||
|
||||
# Running gcovr
|
||||
set(GCOVR_CMD
|
||||
${GCOVR_PATH}
|
||||
--gcov-executable ${GCOV_TOOL}
|
||||
--gcov-ignore-parse-errors=negative_hits.warn_once_per_file
|
||||
-r ${BASEDIR}
|
||||
${GCOVR_ADDITIONAL_ARGS}
|
||||
${GCOVR_EXCLUDE_ARGS}
|
||||
--object-directory=${PROJECT_BINARY_DIR}
|
||||
)
|
||||
|
||||
if(CODE_COVERAGE_VERBOSE)
|
||||
message(STATUS "Executed command report")
|
||||
|
||||
message(STATUS "Command to run tests: ")
|
||||
string(REPLACE ";" " " GCOVR_EXEC_TESTS_CMD_SPACED "${GCOVR_EXEC_TESTS_CMD}")
|
||||
message(STATUS "${GCOVR_EXEC_TESTS_CMD_SPACED}")
|
||||
|
||||
if(NOT GCOVR_FOLDER_CMD STREQUAL "echo")
|
||||
message(STATUS "Command to create a folder: ")
|
||||
string(REPLACE ";" " " GCOVR_FOLDER_CMD_SPACED "${GCOVR_FOLDER_CMD}")
|
||||
message(STATUS "${GCOVR_FOLDER_CMD_SPACED}")
|
||||
endif()
|
||||
|
||||
message(STATUS "Command to generate gcovr coverage data: ")
|
||||
string(REPLACE ";" " " GCOVR_CMD_SPACED "${GCOVR_CMD}")
|
||||
message(STATUS "${GCOVR_CMD_SPACED}")
|
||||
endif()
|
||||
|
||||
add_custom_target(${Coverage_NAME}
|
||||
COMMAND ${GCOVR_EXEC_TESTS_CMD}
|
||||
COMMAND ${GCOVR_FOLDER_CMD}
|
||||
COMMAND ${GCOVR_CMD}
|
||||
|
||||
BYPRODUCTS ${GCOVR_OUTPUT_FILE}
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
DEPENDS ${Coverage_DEPENDENCIES}
|
||||
VERBATIM # Protect arguments to commands
|
||||
COMMENT "Running gcovr to produce code coverage report."
|
||||
)
|
||||
|
||||
# Show info where to find the report
|
||||
add_custom_command(TARGET ${Coverage_NAME} POST_BUILD
|
||||
COMMAND ;
|
||||
COMMENT "Code coverage report saved in ${GCOVR_OUTPUT_FILE} formatted as ${Coverage_FORMAT}"
|
||||
)
|
||||
endfunction() # setup_target_for_coverage_gcovr
|
||||
|
||||
function(append_coverage_compiler_flags)
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} ${COVERAGE_COMPILER_FLAGS}" PARENT_SCOPE)
|
||||
message(STATUS "Appending code coverage compiler flags: ${COVERAGE_COMPILER_FLAGS}")
|
||||
endfunction() # append_coverage_compiler_flags
|
||||
|
||||
# Setup coverage for specific library
|
||||
function(append_coverage_compiler_flags_to_target name)
|
||||
separate_arguments(_flag_list NATIVE_COMMAND "${COVERAGE_COMPILER_FLAGS}")
|
||||
target_compile_options(${name} PRIVATE ${_flag_list})
|
||||
if(CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
|
||||
target_link_libraries(${name} PRIVATE gcov)
|
||||
endif()
|
||||
endfunction()
|
||||
@@ -1,56 +0,0 @@
|
||||
include (CMakeFindDependencyMacro)
|
||||
# need to represent system dependencies of the lib here
|
||||
#[=========================================================[
|
||||
Boost
|
||||
#]=========================================================]
|
||||
if (static OR APPLE OR MSVC)
|
||||
set (Boost_USE_STATIC_LIBS ON)
|
||||
endif ()
|
||||
set (Boost_USE_MULTITHREADED ON)
|
||||
if (static OR MSVC)
|
||||
set (Boost_USE_STATIC_RUNTIME ON)
|
||||
else ()
|
||||
set (Boost_USE_STATIC_RUNTIME OFF)
|
||||
endif ()
|
||||
find_dependency (Boost 1.70
|
||||
COMPONENTS
|
||||
chrono
|
||||
container
|
||||
context
|
||||
coroutine
|
||||
date_time
|
||||
filesystem
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
thread)
|
||||
#[=========================================================[
|
||||
OpenSSL
|
||||
#]=========================================================]
|
||||
if (NOT DEFINED OPENSSL_ROOT_DIR)
|
||||
if (DEFINED ENV{OPENSSL_ROOT})
|
||||
set (OPENSSL_ROOT_DIR $ENV{OPENSSL_ROOT})
|
||||
elseif (APPLE)
|
||||
find_program (homebrew brew)
|
||||
if (homebrew)
|
||||
execute_process (COMMAND ${homebrew} --prefix openssl
|
||||
OUTPUT_VARIABLE OPENSSL_ROOT_DIR
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
endif ()
|
||||
endif ()
|
||||
file (TO_CMAKE_PATH "${OPENSSL_ROOT_DIR}" OPENSSL_ROOT_DIR)
|
||||
endif ()
|
||||
|
||||
if (static OR APPLE OR MSVC)
|
||||
set (OPENSSL_USE_STATIC_LIBS ON)
|
||||
endif ()
|
||||
set (OPENSSL_MSVC_STATIC_RT ON)
|
||||
find_dependency (OpenSSL 1.1.1 REQUIRED)
|
||||
find_dependency (ZLIB)
|
||||
find_dependency (date)
|
||||
if (TARGET ZLIB::ZLIB)
|
||||
set_target_properties(OpenSSL::Crypto PROPERTIES
|
||||
INTERFACE_LINK_LIBRARIES ZLIB::ZLIB)
|
||||
endif ()
|
||||
|
||||
include ("${CMAKE_CURRENT_LIST_DIR}/RippleTargets.cmake")
|
||||
@@ -1,191 +0,0 @@
|
||||
#[===================================================================[
|
||||
setup project-wide compiler settings
|
||||
#]===================================================================]
|
||||
|
||||
#[=========================================================[
|
||||
TODO some/most of these common settings belong in a
|
||||
toolchain file, especially the ABI-impacting ones
|
||||
#]=========================================================]
|
||||
add_library (common INTERFACE)
|
||||
add_library (Ripple::common ALIAS common)
|
||||
# add a single global dependency on this interface lib
|
||||
link_libraries (Ripple::common)
|
||||
set_target_properties (common
|
||||
PROPERTIES INTERFACE_POSITION_INDEPENDENT_CODE ON)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
target_compile_definitions (common
|
||||
INTERFACE
|
||||
$<$<CONFIG:Debug>:DEBUG _DEBUG>
|
||||
$<$<AND:$<BOOL:${profile}>,$<NOT:$<BOOL:${assert}>>>:NDEBUG>)
|
||||
# ^^^^ NOTE: CMAKE release builds already have NDEBUG
|
||||
# defined, so no need to add it explicitly except for
|
||||
# this special case of (profile ON) and (assert OFF)
|
||||
# -- presumably this is because we don't want profile
|
||||
# builds asserting unless asserts were specifically
|
||||
# requested
|
||||
|
||||
if (MSVC)
|
||||
# remove existing exception flag since we set it to -EHa
|
||||
string (REGEX REPLACE "[-/]EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
||||
|
||||
foreach (var_
|
||||
CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE
|
||||
CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE)
|
||||
|
||||
# also remove dynamic runtime
|
||||
string (REGEX REPLACE "[-/]MD[d]*" " " ${var_} "${${var_}}")
|
||||
|
||||
# /ZI (Edit & Continue debugging information) is incompatible with Gy-
|
||||
string (REPLACE "/ZI" "/Zi" ${var_} "${${var_}}")
|
||||
|
||||
# omit debug info completely under CI (not needed)
|
||||
if (is_ci)
|
||||
string (REPLACE "/Zi" " " ${var_} "${${var_}}")
|
||||
endif ()
|
||||
endforeach ()
|
||||
|
||||
target_compile_options (common
|
||||
INTERFACE
|
||||
-bigobj # Increase object file max size
|
||||
-fp:precise # Floating point behavior
|
||||
-Gd # __cdecl calling convention
|
||||
-Gm- # Minimal rebuild: disabled
|
||||
-Gy- # Function level linking: disabled
|
||||
-MP # Multiprocessor compilation
|
||||
-openmp- # pragma omp: disabled
|
||||
-errorReport:none # No error reporting to Internet
|
||||
-nologo # Suppress login banner
|
||||
-wd4018 # Disable signed/unsigned comparison warnings
|
||||
-wd4244 # Disable float to int possible loss of data warnings
|
||||
-wd4267 # Disable size_t to T possible loss of data warnings
|
||||
-wd4800 # Disable C4800(int to bool performance)
|
||||
-wd4503 # Decorated name length exceeded, name was truncated
|
||||
$<$<COMPILE_LANGUAGE:CXX>:
|
||||
-EHa
|
||||
-GR
|
||||
>
|
||||
$<$<CONFIG:Release>:-Ox>
|
||||
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CONFIG:Debug>>:
|
||||
-GS
|
||||
-Zc:forScope
|
||||
>
|
||||
# static runtime
|
||||
$<$<CONFIG:Debug>:-MTd>
|
||||
$<$<NOT:$<CONFIG:Debug>>:-MT>
|
||||
$<$<BOOL:${werr}>:-WX>
|
||||
)
|
||||
target_compile_definitions (common
|
||||
INTERFACE
|
||||
_WIN32_WINNT=0x6000
|
||||
_SCL_SECURE_NO_WARNINGS
|
||||
_CRT_SECURE_NO_WARNINGS
|
||||
WIN32_CONSOLE
|
||||
WIN32_LEAN_AND_MEAN
|
||||
NOMINMAX
|
||||
# TODO: Resolve these warnings, don't just silence them
|
||||
_SILENCE_ALL_CXX17_DEPRECATION_WARNINGS
|
||||
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CONFIG:Debug>>:_CRTDBG_MAP_ALLOC>)
|
||||
target_link_libraries (common
|
||||
INTERFACE
|
||||
-errorreport:none
|
||||
-machine:X64)
|
||||
else ()
|
||||
# HACK : because these need to come first, before any warning demotion
|
||||
string (APPEND CMAKE_CXX_FLAGS " -Wall -Wdeprecated")
|
||||
if (wextra)
|
||||
string (APPEND CMAKE_CXX_FLAGS " -Wextra -Wno-unused-parameter")
|
||||
endif ()
|
||||
# not MSVC
|
||||
target_compile_options (common
|
||||
INTERFACE
|
||||
$<$<BOOL:${werr}>:-Werror>
|
||||
$<$<COMPILE_LANGUAGE:CXX>:
|
||||
-frtti
|
||||
-Wnon-virtual-dtor
|
||||
>
|
||||
-Wno-sign-compare
|
||||
-Wno-char-subscripts
|
||||
-Wno-format
|
||||
-Wno-unused-local-typedefs
|
||||
-fstack-protector
|
||||
$<$<BOOL:${is_gcc}>:
|
||||
-Wno-unused-but-set-variable
|
||||
-Wno-deprecated
|
||||
>
|
||||
$<$<NOT:$<CONFIG:Debug>>:-fno-strict-aliasing>
|
||||
# tweak gcc optimization for debug
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<CONFIG:Debug>>:-O0>
|
||||
# Add debug symbols to release config
|
||||
$<$<CONFIG:Release>:-g>)
|
||||
target_link_libraries (common
|
||||
INTERFACE
|
||||
-rdynamic
|
||||
$<$<BOOL:${is_linux}>:-Wl,-z,relro,-z,now>
|
||||
# link to static libc/c++ iff:
|
||||
# * static option set and
|
||||
# * NOT APPLE (AppleClang does not support static libc/c++) and
|
||||
# * NOT san (sanitizers typically don't work with static libc/c++)
|
||||
$<$<AND:$<BOOL:${static}>,$<NOT:$<BOOL:${APPLE}>>,$<NOT:$<BOOL:${san}>>>:
|
||||
-static-libstdc++
|
||||
-static-libgcc
|
||||
>)
|
||||
endif ()
|
||||
|
||||
if (use_mold)
|
||||
# use mold linker if available
|
||||
execute_process (
|
||||
COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=mold -Wl,--version
|
||||
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
if ("${LD_VERSION}" MATCHES "mold")
|
||||
target_link_libraries (common INTERFACE -fuse-ld=mold)
|
||||
endif ()
|
||||
unset (LD_VERSION)
|
||||
elseif (use_gold AND is_gcc)
|
||||
# use gold linker if available
|
||||
execute_process (
|
||||
COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=gold -Wl,--version
|
||||
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
#[=========================================================[
|
||||
NOTE: THE gold linker inserts -rpath as DT_RUNPATH by
|
||||
default intead of DT_RPATH, so you might have slightly
|
||||
unexpected runtime ld behavior if you were expecting
|
||||
DT_RPATH. Specify --disable-new-dtags to gold if you do
|
||||
not want the default DT_RUNPATH behavior. This rpath
|
||||
treatment as well as static/dynamic selection means that
|
||||
gold does not currently have ideal default behavior when
|
||||
we are using jemalloc. Thus for simplicity we don't use
|
||||
it when jemalloc is requested. An alternative to
|
||||
disabling would be to figure out all the settings
|
||||
required to make gold play nicely with jemalloc.
|
||||
#]=========================================================]
|
||||
if (("${LD_VERSION}" MATCHES "GNU gold") AND (NOT jemalloc))
|
||||
target_link_libraries (common
|
||||
INTERFACE
|
||||
-fuse-ld=gold
|
||||
-Wl,--no-as-needed
|
||||
#[=========================================================[
|
||||
see https://bugs.launchpad.net/ubuntu/+source/eglibc/+bug/1253638/comments/5
|
||||
DT_RUNPATH does not work great for transitive
|
||||
dependencies (of which boost has a few) - so just
|
||||
switch to DT_RPATH if doing dynamic linking with gold
|
||||
#]=========================================================]
|
||||
$<$<NOT:$<BOOL:${static}>>:-Wl,--disable-new-dtags>)
|
||||
endif ()
|
||||
unset (LD_VERSION)
|
||||
elseif (use_lld)
|
||||
# use lld linker if available
|
||||
execute_process (
|
||||
COMMAND ${CMAKE_CXX_COMPILER} -fuse-ld=lld -Wl,--version
|
||||
ERROR_QUIET OUTPUT_VARIABLE LD_VERSION)
|
||||
if ("${LD_VERSION}" MATCHES "LLD")
|
||||
target_link_libraries (common INTERFACE -fuse-ld=lld)
|
||||
endif ()
|
||||
unset (LD_VERSION)
|
||||
endif()
|
||||
|
||||
|
||||
if (assert)
|
||||
foreach (var_ CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELEASE)
|
||||
STRING (REGEX REPLACE "[-/]DNDEBUG" "" ${var_} "${${var_}}")
|
||||
endforeach ()
|
||||
endif ()
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,38 +0,0 @@
|
||||
#[===================================================================[
|
||||
coverage report target
|
||||
#]===================================================================]
|
||||
|
||||
if(NOT coverage)
|
||||
message(FATAL_ERROR "Code coverage not enabled! Aborting ...")
|
||||
endif()
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID MATCHES "MSVC")
|
||||
message(WARNING "Code coverage on Windows is not supported, ignoring 'coverage' flag")
|
||||
return()
|
||||
endif()
|
||||
|
||||
include(CodeCoverage)
|
||||
|
||||
# The instructions for these commands come from the `CodeCoverage` module,
|
||||
# which was copied from https://github.com/bilke/cmake-modules, commit fb7d2a3,
|
||||
# then locally changed (see CHANGES: section in `CodeCoverage.cmake`)
|
||||
|
||||
set(GCOVR_ADDITIONAL_ARGS ${coverage_extra_args})
|
||||
if(NOT GCOVR_ADDITIONAL_ARGS STREQUAL "")
|
||||
separate_arguments(GCOVR_ADDITIONAL_ARGS)
|
||||
endif()
|
||||
|
||||
list(APPEND GCOVR_ADDITIONAL_ARGS
|
||||
--exclude-throw-branches
|
||||
--exclude-noncode-lines
|
||||
--exclude-unreachable-branches -s
|
||||
-j ${coverage_test_parallelism})
|
||||
|
||||
setup_target_for_coverage_gcovr(
|
||||
NAME coverage
|
||||
FORMAT ${coverage_format}
|
||||
EXECUTABLE rippled
|
||||
EXECUTABLE_ARGS --unittest$<$<BOOL:${coverage_test}>:=${coverage_test}> --unittest-jobs ${coverage_test_parallelism} --quiet --unittest-log
|
||||
EXCLUDE "src/test" "${CMAKE_BINARY_DIR}/proto_gen" "${CMAKE_BINARY_DIR}/proto_gen_grpc"
|
||||
DEPENDENCIES rippled
|
||||
)
|
||||
@@ -1,84 +0,0 @@
|
||||
#[===================================================================[
|
||||
docs target (optional)
|
||||
#]===================================================================]
|
||||
|
||||
option(with_docs "Include the docs target?" FALSE)
|
||||
|
||||
if(NOT (with_docs OR only_docs))
|
||||
return()
|
||||
endif()
|
||||
|
||||
find_package(Doxygen)
|
||||
if(NOT TARGET Doxygen::doxygen)
|
||||
message(STATUS "doxygen executable not found -- skipping docs target")
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(doxygen_output_directory "${CMAKE_BINARY_DIR}/docs")
|
||||
set(doxygen_include_path "${CMAKE_CURRENT_SOURCE_DIR}/src")
|
||||
set(doxygen_index_file "${doxygen_output_directory}/html/index.html")
|
||||
set(doxyfile "${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile")
|
||||
|
||||
file(GLOB_RECURSE doxygen_input
|
||||
docs/*.md
|
||||
src/ripple/*.h
|
||||
src/ripple/*.cpp
|
||||
src/ripple/*.md
|
||||
src/test/*.h
|
||||
src/test/*.md
|
||||
Builds/*/README.md)
|
||||
list(APPEND doxygen_input
|
||||
README.md
|
||||
RELEASENOTES.md
|
||||
src/README.md)
|
||||
set(dependencies "${doxygen_input}" "${doxyfile}")
|
||||
|
||||
function(verbose_find_path variable name)
|
||||
# find_path sets a CACHE variable, so don't try using a "local" variable.
|
||||
find_path(${variable} "${name}" ${ARGN})
|
||||
if(NOT ${variable})
|
||||
message(NOTICE "could not find ${name}")
|
||||
else()
|
||||
message(STATUS "found ${name}: ${${variable}}/${name}")
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
verbose_find_path(doxygen_plantuml_jar_path plantuml.jar PATH_SUFFIXES share/plantuml)
|
||||
verbose_find_path(doxygen_dot_path dot)
|
||||
|
||||
# https://en.cppreference.com/w/Cppreference:Archives
|
||||
# https://stackoverflow.com/questions/60822559/how-to-move-a-file-download-from-configure-step-to-build-step
|
||||
set(download_script "${CMAKE_BINARY_DIR}/docs/download-cppreference.cmake")
|
||||
file(WRITE
|
||||
"${download_script}"
|
||||
"file(DOWNLOAD \
|
||||
http://upload.cppreference.com/mwiki/images/b/b2/html_book_20190607.zip \
|
||||
${CMAKE_BINARY_DIR}/docs/cppreference.zip \
|
||||
EXPECTED_HASH MD5=82b3a612d7d35a83e3cb1195a63689ab \
|
||||
)\n \
|
||||
execute_process( \
|
||||
COMMAND \"${CMAKE_COMMAND}\" -E tar -xf cppreference.zip \
|
||||
)\n"
|
||||
)
|
||||
set(tagfile "${CMAKE_BINARY_DIR}/docs/cppreference-doxygen-web.tag.xml")
|
||||
add_custom_command(
|
||||
OUTPUT "${tagfile}"
|
||||
COMMAND "${CMAKE_COMMAND}" -P "${download_script}"
|
||||
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/docs"
|
||||
)
|
||||
set(doxygen_tagfiles "${tagfile}=http://en.cppreference.com/w/")
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT "${doxygen_index_file}"
|
||||
COMMAND "${CMAKE_COMMAND}" -E env
|
||||
"DOXYGEN_OUTPUT_DIRECTORY=${doxygen_output_directory}"
|
||||
"DOXYGEN_INCLUDE_PATH=${doxygen_include_path}"
|
||||
"DOXYGEN_TAGFILES=${doxygen_tagfiles}"
|
||||
"DOXYGEN_PLANTUML_JAR_PATH=${doxygen_plantuml_jar_path}"
|
||||
"DOXYGEN_DOT_PATH=${doxygen_dot_path}"
|
||||
"${DOXYGEN_EXECUTABLE}" "${doxyfile}"
|
||||
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
DEPENDS "${dependencies}" "${tagfile}")
|
||||
add_custom_target(docs
|
||||
DEPENDS "${doxygen_index_file}"
|
||||
SOURCES "${dependencies}")
|
||||
@@ -1,51 +0,0 @@
|
||||
#[===================================================================[
|
||||
install stuff
|
||||
#]===================================================================]
|
||||
|
||||
install (
|
||||
TARGETS
|
||||
common
|
||||
opts
|
||||
ripple_syslibs
|
||||
ripple_boost
|
||||
xrpl_core
|
||||
xrpl.libpb
|
||||
EXPORT RippleExports
|
||||
LIBRARY DESTINATION lib
|
||||
ARCHIVE DESTINATION lib
|
||||
RUNTIME DESTINATION bin
|
||||
INCLUDES DESTINATION include)
|
||||
|
||||
install (EXPORT RippleExports
|
||||
FILE RippleTargets.cmake
|
||||
NAMESPACE Ripple::
|
||||
DESTINATION lib/cmake/ripple)
|
||||
include (CMakePackageConfigHelpers)
|
||||
write_basic_package_version_file (
|
||||
RippleConfigVersion.cmake
|
||||
VERSION ${rippled_version}
|
||||
COMPATIBILITY SameMajorVersion)
|
||||
|
||||
if (is_root_project)
|
||||
install (TARGETS rippled RUNTIME DESTINATION bin)
|
||||
set_target_properties(rippled PROPERTIES INSTALL_RPATH_USE_LINK_PATH ON)
|
||||
install (
|
||||
FILES
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/RippleConfig.cmake
|
||||
${CMAKE_CURRENT_BINARY_DIR}/RippleConfigVersion.cmake
|
||||
DESTINATION lib/cmake/ripple)
|
||||
# sample configs should not overwrite existing files
|
||||
# install if-not-exists workaround as suggested by
|
||||
# https://cmake.org/Bug/view.php?id=12646
|
||||
install(CODE "
|
||||
macro (copy_if_not_exists SRC DEST NEWNAME)
|
||||
if (NOT EXISTS \"\$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/\${DEST}/\${NEWNAME}\")
|
||||
file (INSTALL FILE_PERMISSIONS OWNER_READ OWNER_WRITE DESTINATION \"\${CMAKE_INSTALL_PREFIX}/\${DEST}\" FILES \"\${SRC}\" RENAME \"\${NEWNAME}\")
|
||||
else ()
|
||||
message (\"-- Skipping : \$ENV{DESTDIR}\${CMAKE_INSTALL_PREFIX}/\${DEST}/\${NEWNAME}\")
|
||||
endif ()
|
||||
endmacro()
|
||||
copy_if_not_exists(\"${CMAKE_CURRENT_SOURCE_DIR}/cfg/rippled-example.cfg\" etc rippled.cfg)
|
||||
copy_if_not_exists(\"${CMAKE_CURRENT_SOURCE_DIR}/cfg/validators-example.txt\" etc validators.txt)
|
||||
")
|
||||
endif ()
|
||||
@@ -1,96 +0,0 @@
|
||||
#[===================================================================[
|
||||
rippled compile options/settings via an interface library
|
||||
#]===================================================================]
|
||||
|
||||
add_library (opts INTERFACE)
|
||||
add_library (Ripple::opts ALIAS opts)
|
||||
target_compile_definitions (opts
|
||||
INTERFACE
|
||||
BOOST_ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS
|
||||
$<$<BOOL:${boost_show_deprecated}>:
|
||||
BOOST_ASIO_NO_DEPRECATED
|
||||
BOOST_FILESYSTEM_NO_DEPRECATED
|
||||
>
|
||||
$<$<NOT:$<BOOL:${boost_show_deprecated}>>:
|
||||
BOOST_COROUTINES_NO_DEPRECATION_WARNING
|
||||
BOOST_BEAST_ALLOW_DEPRECATED
|
||||
BOOST_FILESYSTEM_DEPRECATED
|
||||
>
|
||||
$<$<BOOL:${beast_no_unit_test_inline}>:BEAST_NO_UNIT_TEST_INLINE=1>
|
||||
$<$<BOOL:${beast_disable_autolink}>:BEAST_DONT_AUTOLINK_TO_WIN32_LIBRARIES=1>
|
||||
$<$<BOOL:${single_io_service_thread}>:RIPPLE_SINGLE_IO_SERVICE_THREAD=1>)
|
||||
target_compile_options (opts
|
||||
INTERFACE
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<COMPILE_LANGUAGE:CXX>>:-Wsuggest-override>
|
||||
$<$<BOOL:${perf}>:-fno-omit-frame-pointer>
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-g --coverage -fprofile-abs-path>
|
||||
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-g --coverage>
|
||||
$<$<BOOL:${profile}>:-pg>
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
|
||||
|
||||
target_link_libraries (opts
|
||||
INTERFACE
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${coverage}>>:-g --coverage -fprofile-abs-path>
|
||||
$<$<AND:$<BOOL:${is_clang}>,$<BOOL:${coverage}>>:-g --coverage>
|
||||
$<$<BOOL:${profile}>:-pg>
|
||||
$<$<AND:$<BOOL:${is_gcc}>,$<BOOL:${profile}>>:-p>)
|
||||
|
||||
if(jemalloc)
|
||||
find_package(jemalloc REQUIRED)
|
||||
target_compile_definitions(opts INTERFACE PROFILE_JEMALLOC)
|
||||
target_link_libraries(opts INTERFACE jemalloc::jemalloc)
|
||||
endif ()
|
||||
|
||||
if (san)
|
||||
target_compile_options (opts
|
||||
INTERFACE
|
||||
# sanitizers recommend minimum of -O1 for reasonable performance
|
||||
$<$<CONFIG:Debug>:-O1>
|
||||
${SAN_FLAG}
|
||||
-fno-omit-frame-pointer)
|
||||
target_compile_definitions (opts
|
||||
INTERFACE
|
||||
$<$<STREQUAL:${san},address>:SANITIZER=ASAN>
|
||||
$<$<STREQUAL:${san},thread>:SANITIZER=TSAN>
|
||||
$<$<STREQUAL:${san},memory>:SANITIZER=MSAN>
|
||||
$<$<STREQUAL:${san},undefined>:SANITIZER=UBSAN>)
|
||||
target_link_libraries (opts INTERFACE ${SAN_FLAG} ${SAN_LIB})
|
||||
endif ()
|
||||
|
||||
#[===================================================================[
|
||||
rippled transitive library deps via an interface library
|
||||
#]===================================================================]
|
||||
|
||||
add_library (ripple_syslibs INTERFACE)
|
||||
add_library (Ripple::syslibs ALIAS ripple_syslibs)
|
||||
target_link_libraries (ripple_syslibs
|
||||
INTERFACE
|
||||
$<$<BOOL:${MSVC}>:
|
||||
legacy_stdio_definitions.lib
|
||||
Shlwapi
|
||||
kernel32
|
||||
user32
|
||||
gdi32
|
||||
winspool
|
||||
comdlg32
|
||||
advapi32
|
||||
shell32
|
||||
ole32
|
||||
oleaut32
|
||||
uuid
|
||||
odbc32
|
||||
odbccp32
|
||||
crypt32
|
||||
>
|
||||
$<$<NOT:$<BOOL:${MSVC}>>:dl>
|
||||
$<$<NOT:$<OR:$<BOOL:${MSVC}>,$<BOOL:${APPLE}>>>:rt>)
|
||||
|
||||
if (NOT MSVC)
|
||||
set (THREADS_PREFER_PTHREAD_FLAG ON)
|
||||
find_package (Threads)
|
||||
target_link_libraries (ripple_syslibs INTERFACE Threads::Threads)
|
||||
endif ()
|
||||
|
||||
add_library (ripple_libs INTERFACE)
|
||||
add_library (Ripple::libs ALIAS ripple_libs)
|
||||
target_link_libraries (ripple_libs INTERFACE Ripple::syslibs)
|
||||
@@ -1,39 +0,0 @@
|
||||
#[===================================================================[
|
||||
multiconfig misc
|
||||
#]===================================================================]
|
||||
|
||||
if (is_multiconfig)
|
||||
# This code finds all source files in the src subdirectory for inclusion
|
||||
# in the IDE file tree as non-compiled sources. Since this file list will
|
||||
# have some overlap with files we have already added to our targets to
|
||||
# be compiled, we explicitly remove any of these target source files from
|
||||
# this list.
|
||||
file (GLOB_RECURSE all_sources RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
CONFIGURE_DEPENDS
|
||||
src/*.* Builds/*.md docs/*.md src/*.md Builds/*.cmake)
|
||||
file(GLOB md_files RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} CONFIGURE_DEPENDS
|
||||
*.md)
|
||||
LIST(APPEND all_sources ${md_files})
|
||||
foreach (_target secp256k1::secp256k1 ed25519::ed25519 xrpl_core rippled)
|
||||
get_target_property (_type ${_target} TYPE)
|
||||
if(_type STREQUAL "INTERFACE_LIBRARY")
|
||||
continue()
|
||||
endif()
|
||||
get_target_property (_src ${_target} SOURCES)
|
||||
list (REMOVE_ITEM all_sources ${_src})
|
||||
endforeach ()
|
||||
target_sources (rippled PRIVATE ${all_sources})
|
||||
set_property (
|
||||
SOURCE ${all_sources}
|
||||
APPEND
|
||||
PROPERTY HEADER_FILE_ONLY true)
|
||||
if (MSVC)
|
||||
set_property(
|
||||
DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
PROPERTY VS_STARTUP_PROJECT rippled)
|
||||
endif ()
|
||||
|
||||
group_sources(src)
|
||||
group_sources(docs)
|
||||
group_sources(Builds)
|
||||
endif ()
|
||||
@@ -1,80 +0,0 @@
|
||||
#[===================================================================[
|
||||
convenience variables and sanity checks
|
||||
#]===================================================================]
|
||||
|
||||
include(ProcessorCount)
|
||||
|
||||
if (NOT ep_procs)
|
||||
ProcessorCount(ep_procs)
|
||||
if (ep_procs GREATER 1)
|
||||
# never use more than half of cores for EP builds
|
||||
math (EXPR ep_procs "${ep_procs} / 2")
|
||||
message (STATUS "Using ${ep_procs} cores for ExternalProject builds.")
|
||||
endif ()
|
||||
endif ()
|
||||
get_property(is_multiconfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
|
||||
|
||||
set (CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "" FORCE)
|
||||
if (NOT is_multiconfig)
|
||||
if (NOT CMAKE_BUILD_TYPE)
|
||||
message (STATUS "Build type not specified - defaulting to Release")
|
||||
set (CMAKE_BUILD_TYPE Release CACHE STRING "build type" FORCE)
|
||||
elseif (NOT (CMAKE_BUILD_TYPE STREQUAL Debug OR CMAKE_BUILD_TYPE STREQUAL Release))
|
||||
# for simplicity, these are the only two config types we care about. Limiting
|
||||
# the build types simplifies dealing with external project builds especially
|
||||
message (FATAL_ERROR " *** Only Debug or Release build types are currently supported ***")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
get_directory_property(has_parent PARENT_DIRECTORY)
|
||||
if (has_parent)
|
||||
set (is_root_project OFF)
|
||||
else ()
|
||||
set (is_root_project ON)
|
||||
endif ()
|
||||
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES ".*Clang") # both Clang and AppleClang
|
||||
set (is_clang TRUE)
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND
|
||||
CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.0)
|
||||
message (FATAL_ERROR "This project requires clang 8 or later")
|
||||
endif ()
|
||||
# TODO min AppleClang version check ?
|
||||
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
|
||||
set (is_gcc TRUE)
|
||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 8.0)
|
||||
message (FATAL_ERROR "This project requires GCC 8 or later")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
set (is_linux TRUE)
|
||||
else ()
|
||||
set (is_linux FALSE)
|
||||
endif ()
|
||||
|
||||
if ("$ENV{CI}" STREQUAL "true" OR "$ENV{CONTINUOUS_INTEGRATION}" STREQUAL "true")
|
||||
set (is_ci TRUE)
|
||||
else ()
|
||||
set (is_ci FALSE)
|
||||
endif ()
|
||||
|
||||
# check for in-source build and fail
|
||||
if ("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
|
||||
message (FATAL_ERROR "Builds (in-source) are not allowed in "
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}. Please remove CMakeCache.txt and the CMakeFiles "
|
||||
"directory from ${CMAKE_CURRENT_SOURCE_DIR} and try building in a separate directory.")
|
||||
endif ()
|
||||
|
||||
if (MSVC AND CMAKE_GENERATOR_PLATFORM STREQUAL "Win32")
|
||||
message (FATAL_ERROR "Visual Studio 32-bit build is not supported.")
|
||||
endif ()
|
||||
|
||||
if (NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
message (FATAL_ERROR "Rippled requires a 64 bit target architecture.\n"
|
||||
"The most likely cause of this warning is trying to build rippled with a 32-bit OS.")
|
||||
endif ()
|
||||
|
||||
if (APPLE AND NOT HOMEBREW)
|
||||
find_program (HOMEBREW brew)
|
||||
endif ()
|
||||
@@ -1,130 +0,0 @@
|
||||
#[===================================================================[
|
||||
declare user options/settings
|
||||
#]===================================================================]
|
||||
|
||||
include(ProcessorCount)
|
||||
|
||||
ProcessorCount(PROCESSOR_COUNT)
|
||||
|
||||
option(assert "Enables asserts, even in release builds" OFF)
|
||||
|
||||
option(reporting "Build rippled with reporting mode enabled" OFF)
|
||||
|
||||
option(tests "Build tests" ON)
|
||||
|
||||
option(unity "Creates a build using UNITY support in cmake. This is the default" ON)
|
||||
if(unity)
|
||||
if(NOT is_ci)
|
||||
set(CMAKE_UNITY_BUILD_BATCH_SIZE 15 CACHE STRING "")
|
||||
endif()
|
||||
endif()
|
||||
if(is_gcc OR is_clang)
|
||||
option(coverage "Generates coverage info." OFF)
|
||||
option(profile "Add profiling flags" OFF)
|
||||
set(coverage_test_parallelism "${PROCESSOR_COUNT}" CACHE STRING
|
||||
"Unit tests parallelism for the purpose of coverage report.")
|
||||
set(coverage_format "html-details" CACHE STRING
|
||||
"Output format of the coverage report.")
|
||||
set(coverage_extra_args "" CACHE STRING
|
||||
"Additional arguments to pass to gcovr.")
|
||||
set(coverage_test "" CACHE STRING
|
||||
"On gcc & clang, the specific unit test(s) to run for coverage. Default is all tests.")
|
||||
if(coverage_test AND NOT coverage)
|
||||
set(coverage ON CACHE BOOL "gcc/clang only" FORCE)
|
||||
endif()
|
||||
option(wextra "compile with extra gcc/clang warnings enabled" ON)
|
||||
else()
|
||||
set(profile OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
set(coverage OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
set(wextra OFF CACHE BOOL "gcc/clang only" FORCE)
|
||||
endif()
|
||||
if(is_linux)
|
||||
option(BUILD_SHARED_LIBS "build shared ripple libraries" OFF)
|
||||
option(static "link protobuf, openssl, libc++, and boost statically" ON)
|
||||
option(perf "Enables flags that assist with perf recording" OFF)
|
||||
option(use_gold "enables detection of gold (binutils) linker" ON)
|
||||
option(use_mold "enables detection of mold (binutils) linker" ON)
|
||||
else()
|
||||
# we are not ready to allow shared-libs on windows because it would require
|
||||
# export declarations. On macos it's more feasible, but static openssl
|
||||
# produces odd linker errors, thus we disable shared lib builds for now.
|
||||
set(BUILD_SHARED_LIBS OFF CACHE BOOL "build shared ripple libraries - OFF for win/macos" FORCE)
|
||||
set(static ON CACHE BOOL "static link, linux only. ON for WIN/macos" FORCE)
|
||||
set(perf OFF CACHE BOOL "perf flags, linux only" FORCE)
|
||||
set(use_gold OFF CACHE BOOL "gold linker, linux only" FORCE)
|
||||
set(use_mold OFF CACHE BOOL "mold linker, linux only" FORCE)
|
||||
endif()
|
||||
if(is_clang)
|
||||
option(use_lld "enables detection of lld linker" ON)
|
||||
else()
|
||||
set(use_lld OFF CACHE BOOL "try lld linker, clang only" FORCE)
|
||||
endif()
|
||||
option(jemalloc "Enables jemalloc for heap profiling" OFF)
|
||||
option(werr "treat warnings as errors" OFF)
|
||||
option(local_protobuf
|
||||
"Force a local build of protobuf instead of looking for an installed version." OFF)
|
||||
option(local_grpc
|
||||
"Force a local build of gRPC instead of looking for an installed version." OFF)
|
||||
|
||||
# this one is a string and therefore can't be an option
|
||||
set(san "" CACHE STRING "On gcc & clang, add sanitizer instrumentation")
|
||||
set_property(CACHE san PROPERTY STRINGS ";undefined;memory;address;thread")
|
||||
if(san)
|
||||
string(TOLOWER ${san} san)
|
||||
set(SAN_FLAG "-fsanitize=${san}")
|
||||
set(SAN_LIB "")
|
||||
if(is_gcc)
|
||||
if(san STREQUAL "address")
|
||||
set(SAN_LIB "asan")
|
||||
elseif(san STREQUAL "thread")
|
||||
set(SAN_LIB "tsan")
|
||||
elseif(san STREQUAL "memory")
|
||||
set(SAN_LIB "msan")
|
||||
elseif(san STREQUAL "undefined")
|
||||
set(SAN_LIB "ubsan")
|
||||
endif()
|
||||
endif()
|
||||
set(_saved_CRL ${CMAKE_REQUIRED_LIBRARIES})
|
||||
set(CMAKE_REQUIRED_LIBRARIES "${SAN_FLAG};${SAN_LIB}")
|
||||
check_cxx_compiler_flag(${SAN_FLAG} COMPILER_SUPPORTS_SAN)
|
||||
set(CMAKE_REQUIRED_LIBRARIES ${_saved_CRL})
|
||||
if(NOT COMPILER_SUPPORTS_SAN)
|
||||
message(FATAL_ERROR "${san} sanitizer does not seem to be supported by your compiler")
|
||||
endif()
|
||||
endif()
|
||||
set(container_label "" CACHE STRING "tag to use for package building containers")
|
||||
option(packages_only
|
||||
"ONLY generate package building targets. This is special use-case and almost \
|
||||
certainly not what you want. Use with caution as you won't be able to build \
|
||||
any compiled targets locally." OFF)
|
||||
option(have_package_container
|
||||
"Sometimes you already have the tagged container you want to use for package \
|
||||
building and you don't want docker to rebuild it. This flag will detach the \
|
||||
dependency of the package build from the container build. It's an advanced \
|
||||
use case and most likely you should not be touching this flag." OFF)
|
||||
|
||||
# the remaining options are obscure and rarely used
|
||||
option(beast_no_unit_test_inline
|
||||
"Prevents unit test definitions from being inserted into global table"
|
||||
OFF)
|
||||
option(single_io_service_thread
|
||||
"Restricts the number of threads calling io_service::run to one. \
|
||||
This can be useful when debugging."
|
||||
OFF)
|
||||
option(boost_show_deprecated
|
||||
"Allow boost to fail on deprecated usage. Only useful if you're trying\
|
||||
to find deprecated calls."
|
||||
OFF)
|
||||
option(beast_hashers
|
||||
"Use local implementations for sha/ripemd hashes (experimental, not recommended)"
|
||||
OFF)
|
||||
|
||||
if(WIN32)
|
||||
option(beast_disable_autolink "Disables autolinking of system libraries on WIN32" OFF)
|
||||
else()
|
||||
set(beast_disable_autolink OFF CACHE BOOL "WIN32 only" FORCE)
|
||||
endif()
|
||||
if(coverage)
|
||||
message(STATUS "coverage build requested - forcing Debug build")
|
||||
set(CMAKE_BUILD_TYPE Debug CACHE STRING "build type" FORCE)
|
||||
endif()
|
||||
@@ -1,22 +0,0 @@
|
||||
option (validator_keys "Enables building of validator-keys-tool as a separate target (imported via FetchContent)" OFF)
|
||||
|
||||
if (validator_keys)
|
||||
git_branch (current_branch)
|
||||
# default to tracking VK master branch unless we are on release
|
||||
if (NOT (current_branch STREQUAL "release"))
|
||||
set (current_branch "master")
|
||||
endif ()
|
||||
message (STATUS "tracking ValidatorKeys branch: ${current_branch}")
|
||||
|
||||
FetchContent_Declare (
|
||||
validator_keys_src
|
||||
GIT_REPOSITORY https://github.com/ripple/validator-keys-tool.git
|
||||
GIT_TAG "${current_branch}"
|
||||
)
|
||||
FetchContent_GetProperties (validator_keys_src)
|
||||
if (NOT validator_keys_src_POPULATED)
|
||||
message (STATUS "Pausing to download ValidatorKeys...")
|
||||
FetchContent_Populate (validator_keys_src)
|
||||
endif ()
|
||||
add_subdirectory (${validator_keys_src_SOURCE_DIR} ${CMAKE_BINARY_DIR}/validator-keys)
|
||||
endif ()
|
||||
@@ -1,15 +0,0 @@
|
||||
#[===================================================================[
|
||||
read version from source
|
||||
#]===================================================================]
|
||||
|
||||
file (STRINGS src/ripple/protocol/impl/BuildInfo.cpp BUILD_INFO)
|
||||
foreach (line_ ${BUILD_INFO})
|
||||
if (line_ MATCHES "versionString[ ]*=[ ]*\"(.+)\"")
|
||||
set (rippled_version ${CMAKE_MATCH_1})
|
||||
endif ()
|
||||
endforeach ()
|
||||
if (rippled_version)
|
||||
message (STATUS "rippled version: ${rippled_version}")
|
||||
else ()
|
||||
message (FATAL_ERROR "unable to determine rippled version")
|
||||
endif ()
|
||||
@@ -1,53 +0,0 @@
|
||||
find_package(Boost 1.82 REQUIRED
|
||||
COMPONENTS
|
||||
chrono
|
||||
container
|
||||
context
|
||||
coroutine
|
||||
date_time
|
||||
filesystem
|
||||
json
|
||||
program_options
|
||||
regex
|
||||
system
|
||||
thread
|
||||
)
|
||||
|
||||
add_library(ripple_boost INTERFACE)
|
||||
add_library(Ripple::boost ALIAS ripple_boost)
|
||||
if(XCODE)
|
||||
target_include_directories(ripple_boost BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
||||
target_compile_options(ripple_boost INTERFACE --system-header-prefix="boost/")
|
||||
else()
|
||||
target_include_directories(ripple_boost SYSTEM BEFORE INTERFACE ${Boost_INCLUDE_DIRS})
|
||||
endif()
|
||||
|
||||
target_link_libraries(ripple_boost
|
||||
INTERFACE
|
||||
Boost::boost
|
||||
Boost::chrono
|
||||
Boost::container
|
||||
Boost::coroutine
|
||||
Boost::date_time
|
||||
Boost::filesystem
|
||||
Boost::json
|
||||
Boost::program_options
|
||||
Boost::regex
|
||||
Boost::system
|
||||
Boost::thread)
|
||||
if(Boost_COMPILER)
|
||||
target_link_libraries(ripple_boost INTERFACE Boost::disable_autolinking)
|
||||
endif()
|
||||
if(san AND is_clang)
|
||||
# TODO: gcc does not support -fsanitize-blacklist...can we do something else
|
||||
# for gcc ?
|
||||
if(NOT Boost_INCLUDE_DIRS AND TARGET Boost::headers)
|
||||
get_target_property(Boost_INCLUDE_DIRS Boost::headers INTERFACE_INCLUDE_DIRECTORIES)
|
||||
endif()
|
||||
message(STATUS "Adding [${Boost_INCLUDE_DIRS}] to sanitizer blacklist")
|
||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt "src:${Boost_INCLUDE_DIRS}/*")
|
||||
target_compile_options(opts
|
||||
INTERFACE
|
||||
# ignore boost headers for sanitizing
|
||||
-fsanitize-blacklist=${CMAKE_CURRENT_BINARY_DIR}/san_bl.txt)
|
||||
endif()
|
||||
@@ -1,62 +0,0 @@
|
||||
find_package(Protobuf REQUIRED)
|
||||
|
||||
# .proto files import each other like this:
|
||||
#
|
||||
# import "path/to/file.proto";
|
||||
#
|
||||
# For the protobuf compiler to find these imports,
|
||||
# the parent directory of "path" must be in the import path.
|
||||
#
|
||||
# When generating C++,
|
||||
# it turns into an include statement like this:
|
||||
#
|
||||
# #include "path/to/file.pb.h"
|
||||
#
|
||||
# and the header is generated at a path relative to the output directory
|
||||
# that matches the given .proto path relative to the source directory
|
||||
# minus the first matching prefix on the import path.
|
||||
#
|
||||
# In other words, a file `include/package/path/to/file.proto`
|
||||
# with import path [`include/package`, `include`]
|
||||
# will generate files `output/path/to/file.pb.{h,cc}`
|
||||
# with includes like `#include "path/to/file.pb.h".
|
||||
#
|
||||
# During build, the generated files can find each other if the output
|
||||
# directory is an include directory, but we want to install that directory
|
||||
# under our package's include directory (`include/package`), not as a sibling.
|
||||
# After install, they can find each other if that subdirectory is an include
|
||||
# directory.
|
||||
|
||||
# Add protocol buffer sources to an existing library target.
|
||||
# target:
|
||||
# The name of the library target.
|
||||
# prefix:
|
||||
# The install prefix for headers relative to `CMAKE_INSTALL_INCLUDEDIR`.
|
||||
# This prefix should appear at the start of all your consumer includes.
|
||||
# ARGN:
|
||||
# A list of .proto files.
|
||||
function(target_protobuf_sources target prefix)
|
||||
set(dir "${CMAKE_CURRENT_BINARY_DIR}/pb-${target}")
|
||||
file(MAKE_DIRECTORY "${dir}/${prefix}")
|
||||
|
||||
protobuf_generate(
|
||||
TARGET ${target}
|
||||
PROTOC_OUT_DIR "${dir}/${prefix}"
|
||||
"${ARGN}"
|
||||
)
|
||||
target_include_directories(${target} SYSTEM PUBLIC
|
||||
# Allows #include <package/path/to/file.proto> used by consumer files.
|
||||
$<BUILD_INTERFACE:${dir}>
|
||||
# Allows #include "path/to/file.proto" used by generated files.
|
||||
$<BUILD_INTERFACE:${dir}/${prefix}>
|
||||
# Allows #include <package/path/to/file.proto> used by consumer files.
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
|
||||
# Allows #include "path/to/file.proto" used by generated files.
|
||||
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/${prefix}>
|
||||
)
|
||||
install(
|
||||
DIRECTORY ${dir}/
|
||||
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
|
||||
FILES_MATCHING PATTERN "*.h"
|
||||
)
|
||||
endfunction()
|
||||
@@ -1,114 +0,0 @@
|
||||
# Levelization
|
||||
|
||||
Levelization is the term used to describe efforts to prevent rippled from
|
||||
having or creating cyclic dependencies.
|
||||
|
||||
rippled code is organized into directories under `src/rippled` (and
|
||||
`src/test`) representing modules. The modules are intended to be
|
||||
organized into "tiers" or "levels" such that a module from one level can
|
||||
only include code from lower levels. Additionally, a module
|
||||
in one level should never include code in an `impl` folder of any level
|
||||
other than it's own.
|
||||
|
||||
Unfortunately, over time, enforcement of levelization has been
|
||||
inconsistent, so the current state of the code doesn't necessarily
|
||||
reflect these rules. Whenever possible, developers should refactor any
|
||||
levelization violations they find (by moving files or individual
|
||||
classes). At the very least, don't make things worse.
|
||||
|
||||
The table below summarizes the _desired_ division of modules, based on the
|
||||
state of the rippled code when it was created. The levels are numbered from
|
||||
the bottom up with the lower level, lower numbered, more independent
|
||||
modules listed first, and the higher level, higher numbered modules with
|
||||
more dependencies listed later.
|
||||
|
||||
**tl;dr:** The modules listed first are more independent than the modules
|
||||
listed later.
|
||||
|
||||
| Level / Tier | Module(s) |
|
||||
|--------------|-----------------------------------------------|
|
||||
| 01 | ripple/beast ripple/unity
|
||||
| 02 | ripple/basics
|
||||
| 03 | ripple/json ripple/crypto
|
||||
| 04 | ripple/protocol
|
||||
| 05 | ripple/core ripple/conditions ripple/consensus ripple/resource ripple/server
|
||||
| 06 | ripple/peerfinder ripple/ledger ripple/nodestore ripple/net
|
||||
| 07 | ripple/shamap ripple/overlay
|
||||
| 08 | ripple/app
|
||||
| 09 | ripple/rpc
|
||||
| 10 | ripple/perflog
|
||||
| 11 | test/jtx test/beast test/csf
|
||||
| 12 | test/unit_test
|
||||
| 13 | test/crypto test/conditions test/json test/resource test/shamap test/peerfinder test/basics test/overlay
|
||||
| 14 | test
|
||||
| 15 | test/net test/protocol test/ledger test/consensus test/core test/server test/nodestore
|
||||
| 16 | test/rpc test/app
|
||||
|
||||
(Note that `test` levelization is *much* less important and *much* less
|
||||
strictly enforced than `ripple` levelization, other than the requirement
|
||||
that `test` code should *never* be included in `ripple` code.)
|
||||
|
||||
## Validation
|
||||
|
||||
The [levelization.sh](levelization.sh) script takes no parameters,
|
||||
reads no environment variables, and can be run from any directory,
|
||||
as long as it is in the expected location in the rippled repo.
|
||||
It can be run at any time from within a checked out repo, and will
|
||||
do an analysis of all the `#include`s in
|
||||
the rippled source. The only caveat is that it runs much slower
|
||||
under Windows than in Linux. It hasn't yet been tested under MacOS.
|
||||
It generates many files of [results](results):
|
||||
|
||||
* `rawincludes.txt`: The raw dump of the `#includes`
|
||||
* `paths.txt`: A second dump grouping the source module
|
||||
to the destination module, deduped, and with frequency counts.
|
||||
* `includes/`: A directory where each file represents a module and
|
||||
contains a list of modules and counts that the module _includes_.
|
||||
* `includedby/`: Similar to `includes/`, but the other way around. Each
|
||||
file represents a module and contains a list of modules and counts
|
||||
that _include_ the module.
|
||||
* [`loops.txt`](results/loops.txt): A list of direct loops detected
|
||||
between modules as they actually exist, as opposed to how they are
|
||||
desired as described above. In a perfect repo, this file will be
|
||||
empty.
|
||||
This file is committed to the repo, and is used by the [levelization
|
||||
Github workflow](../../.github/workflows/levelization.yml) to validate
|
||||
that nothing changed.
|
||||
* [`ordering.txt`](results/ordering.txt): A list showing relationships
|
||||
between modules where there are no loops as they actually exist, as
|
||||
opposed to how they are desired as described above.
|
||||
This file is committed to the repo, and is used by the [levelization
|
||||
Github workflow](../../.github/workflows/levelization.yml) to validate
|
||||
that nothing changed.
|
||||
* [`levelization.yml`](../../.github/workflows/levelization.yml)
|
||||
Github Actions workflow to test that levelization loops haven't
|
||||
changed. Unfortunately, if changes are detected, it can't tell if
|
||||
they are improvements or not, so if you have resolved any issues or
|
||||
done anything else to improve levelization, run `levelization.sh`,
|
||||
and commit the updated results.
|
||||
|
||||
The `loops.txt` and `ordering.txt` files relate the modules
|
||||
using comparison signs, which indicate the number of times each
|
||||
module is included in the other.
|
||||
|
||||
* `A > B` means that A should probably be at a higher level than B,
|
||||
because B is included in A significantly more than A is included in B.
|
||||
These results can be included in both `loops.txt` and `ordering.txt`.
|
||||
Because `ordering.txt`only includes relationships where B is not
|
||||
included in A at all, it will only include these types of results.
|
||||
* `A ~= B` means that A and B are included in each other a different
|
||||
number of times, but the values are so close that the script can't
|
||||
definitively say that one should be above the other. These results
|
||||
will only be included in `loops.txt`.
|
||||
* `A == B` means that A and B include each other the same number of
|
||||
times, so the script has no clue which should be higher. These results
|
||||
will only be included in `loops.txt`.
|
||||
|
||||
The committed files hide the detailed values intentionally, to
|
||||
prevent false alarms and merging issues, and because it's easy to
|
||||
get those details locally.
|
||||
|
||||
1. Run `levelization.sh`
|
||||
2. Grep the modules in `paths.txt`.
|
||||
* For example, if a cycle is found `A ~= B`, simply `grep -w
|
||||
A Builds/levelization/results/paths.txt | grep -w B`
|
||||
@@ -1,127 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Usage: levelization.sh
|
||||
# This script takes no parameters, reads no environment variables,
|
||||
# and can be run from any directory, as long as it is in the expected
|
||||
# location in the repo.
|
||||
|
||||
pushd $( dirname $0 )
|
||||
|
||||
if [ -v PS1 ]
|
||||
then
|
||||
# if the shell is interactive, clean up any flotsam before analyzing
|
||||
git clean -ix
|
||||
fi
|
||||
|
||||
rm -rfv results
|
||||
mkdir results
|
||||
includes="$( pwd )/results/rawincludes.txt"
|
||||
pushd ../..
|
||||
echo Raw includes:
|
||||
grep -r '#include.*/.*\.h' src/ripple/ src/test/ | \
|
||||
grep -v boost | tee ${includes}
|
||||
popd
|
||||
pushd results
|
||||
|
||||
oldifs=${IFS}
|
||||
IFS=:
|
||||
mkdir includes
|
||||
mkdir includedby
|
||||
echo Build levelization paths
|
||||
exec 3< ${includes} # open rawincludes.txt for input
|
||||
while read -r -u 3 file include
|
||||
do
|
||||
level=$( echo ${file} | cut -d/ -f 2,3 )
|
||||
# If the "level" indicates a file, cut off the filename
|
||||
if [[ "${level##*.}" != "${level}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
level="$( dirname ${level} )/toplevel"
|
||||
fi
|
||||
level=$( echo ${level} | tr '/' '.' )
|
||||
|
||||
includelevel=$( echo ${include} | sed 's/.*["<]//; s/[">].*//' | \
|
||||
cut -d/ -f 1,2 )
|
||||
if [[ "${includelevel##*.}" != "${includelevel}" ]]
|
||||
then
|
||||
# Use the "toplevel" label as a workaround for `sort`
|
||||
# inconsistencies between different utility versions
|
||||
includelevel="$( dirname ${includelevel} )/toplevel"
|
||||
fi
|
||||
includelevel=$( echo ${includelevel} | tr '/' '.' )
|
||||
|
||||
if [[ "$level" != "$includelevel" ]]
|
||||
then
|
||||
echo $level $includelevel | tee -a paths.txt
|
||||
fi
|
||||
done
|
||||
echo Sort and dedup paths
|
||||
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
|
||||
mv sortedpaths.txt paths.txt
|
||||
exec 3>&- #close fd 3
|
||||
IFS=${oldifs}
|
||||
unset oldifs
|
||||
|
||||
echo Split into flat-file database
|
||||
exec 4<paths.txt # open paths.txt for input
|
||||
while read -r -u 4 count level include
|
||||
do
|
||||
echo ${include} ${count} | tee -a includes/${level}
|
||||
echo ${level} ${count} | tee -a includedby/${include}
|
||||
done
|
||||
exec 4>&- #close fd 4
|
||||
|
||||
loops="$( pwd )/loops.txt"
|
||||
ordering="$( pwd )/ordering.txt"
|
||||
pushd includes
|
||||
echo Search for loops
|
||||
# Redirect stdout to a file
|
||||
exec 4>&1
|
||||
exec 1>"${loops}"
|
||||
for source in *
|
||||
do
|
||||
if [[ -f "$source" ]]
|
||||
then
|
||||
exec 5<"${source}" # open for input
|
||||
while read -r -u 5 include includefreq
|
||||
do
|
||||
if [[ -f $include ]]
|
||||
then
|
||||
if grep -q -w $source $include
|
||||
then
|
||||
if grep -q -w "Loop: $include $source" "${loops}"
|
||||
then
|
||||
continue
|
||||
fi
|
||||
sourcefreq=$( grep -w $source $include | cut -d\ -f2 )
|
||||
echo "Loop: $source $include"
|
||||
# If the counts are close, indicate that the two modules are
|
||||
# on the same level, though they shouldn't be
|
||||
if [[ $(( $includefreq - $sourcefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $source > $include\n"
|
||||
elif [[ $(( $sourcefreq - $includefreq )) -gt 3 ]]
|
||||
then
|
||||
echo -e " $include > $source\n"
|
||||
elif [[ $sourcefreq -eq $includefreq ]]
|
||||
then
|
||||
echo -e " $include == $source\n"
|
||||
else
|
||||
echo -e " $include ~= $source\n"
|
||||
fi
|
||||
else
|
||||
echo "$source > $include" >> "${ordering}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
exec 5>&- #close fd 5
|
||||
fi
|
||||
done
|
||||
exec 1>&4 #close fd 1
|
||||
exec 4>&- #close fd 4
|
||||
cat "${ordering}"
|
||||
cat "${loops}"
|
||||
popd
|
||||
popd
|
||||
popd
|
||||
@@ -1,51 +0,0 @@
|
||||
Loop: ripple.app ripple.core
|
||||
ripple.app > ripple.core
|
||||
|
||||
Loop: ripple.app ripple.ledger
|
||||
ripple.app > ripple.ledger
|
||||
|
||||
Loop: ripple.app ripple.net
|
||||
ripple.app > ripple.net
|
||||
|
||||
Loop: ripple.app ripple.nodestore
|
||||
ripple.app > ripple.nodestore
|
||||
|
||||
Loop: ripple.app ripple.overlay
|
||||
ripple.overlay ~= ripple.app
|
||||
|
||||
Loop: ripple.app ripple.peerfinder
|
||||
ripple.app > ripple.peerfinder
|
||||
|
||||
Loop: ripple.app ripple.rpc
|
||||
ripple.rpc > ripple.app
|
||||
|
||||
Loop: ripple.app ripple.shamap
|
||||
ripple.app > ripple.shamap
|
||||
|
||||
Loop: ripple.basics ripple.core
|
||||
ripple.core > ripple.basics
|
||||
|
||||
Loop: ripple.basics ripple.json
|
||||
ripple.json ~= ripple.basics
|
||||
|
||||
Loop: ripple.basics ripple.protocol
|
||||
ripple.protocol > ripple.basics
|
||||
|
||||
Loop: ripple.core ripple.net
|
||||
ripple.net > ripple.core
|
||||
|
||||
Loop: ripple.net ripple.rpc
|
||||
ripple.rpc > ripple.net
|
||||
|
||||
Loop: ripple.nodestore ripple.overlay
|
||||
ripple.overlay ~= ripple.nodestore
|
||||
|
||||
Loop: ripple.overlay ripple.rpc
|
||||
ripple.rpc ~= ripple.overlay
|
||||
|
||||
Loop: test.jtx test.toplevel
|
||||
test.toplevel > test.jtx
|
||||
|
||||
Loop: test.jtx test.unit_test
|
||||
test.unit_test == test.jtx
|
||||
|
||||
@@ -1,229 +0,0 @@
|
||||
ripple.app > ripple.basics
|
||||
ripple.app > ripple.beast
|
||||
ripple.app > ripple.conditions
|
||||
ripple.app > ripple.consensus
|
||||
ripple.app > ripple.crypto
|
||||
ripple.app > ripple.json
|
||||
ripple.app > ripple.protocol
|
||||
ripple.app > ripple.resource
|
||||
ripple.app > test.unit_test
|
||||
ripple.basics > ripple.beast
|
||||
ripple.conditions > ripple.basics
|
||||
ripple.conditions > ripple.protocol
|
||||
ripple.consensus > ripple.basics
|
||||
ripple.consensus > ripple.beast
|
||||
ripple.consensus > ripple.json
|
||||
ripple.consensus > ripple.protocol
|
||||
ripple.core > ripple.beast
|
||||
ripple.core > ripple.json
|
||||
ripple.core > ripple.protocol
|
||||
ripple.crypto > ripple.basics
|
||||
ripple.json > ripple.beast
|
||||
ripple.ledger > ripple.basics
|
||||
ripple.ledger > ripple.beast
|
||||
ripple.ledger > ripple.core
|
||||
ripple.ledger > ripple.json
|
||||
ripple.ledger > ripple.protocol
|
||||
ripple.net > ripple.basics
|
||||
ripple.net > ripple.beast
|
||||
ripple.net > ripple.json
|
||||
ripple.net > ripple.protocol
|
||||
ripple.net > ripple.resource
|
||||
ripple.nodestore > ripple.basics
|
||||
ripple.nodestore > ripple.beast
|
||||
ripple.nodestore > ripple.core
|
||||
ripple.nodestore > ripple.json
|
||||
ripple.nodestore > ripple.protocol
|
||||
ripple.nodestore > ripple.unity
|
||||
ripple.overlay > ripple.basics
|
||||
ripple.overlay > ripple.beast
|
||||
ripple.overlay > ripple.core
|
||||
ripple.overlay > ripple.json
|
||||
ripple.overlay > ripple.peerfinder
|
||||
ripple.overlay > ripple.protocol
|
||||
ripple.overlay > ripple.resource
|
||||
ripple.overlay > ripple.server
|
||||
ripple.peerfinder > ripple.basics
|
||||
ripple.peerfinder > ripple.beast
|
||||
ripple.peerfinder > ripple.core
|
||||
ripple.peerfinder > ripple.protocol
|
||||
ripple.perflog > ripple.basics
|
||||
ripple.perflog > ripple.beast
|
||||
ripple.perflog > ripple.core
|
||||
ripple.perflog > ripple.json
|
||||
ripple.perflog > ripple.nodestore
|
||||
ripple.perflog > ripple.protocol
|
||||
ripple.perflog > ripple.rpc
|
||||
ripple.protocol > ripple.beast
|
||||
ripple.protocol > ripple.crypto
|
||||
ripple.protocol > ripple.json
|
||||
ripple.resource > ripple.basics
|
||||
ripple.resource > ripple.beast
|
||||
ripple.resource > ripple.json
|
||||
ripple.resource > ripple.protocol
|
||||
ripple.rpc > ripple.basics
|
||||
ripple.rpc > ripple.beast
|
||||
ripple.rpc > ripple.core
|
||||
ripple.rpc > ripple.crypto
|
||||
ripple.rpc > ripple.json
|
||||
ripple.rpc > ripple.ledger
|
||||
ripple.rpc > ripple.nodestore
|
||||
ripple.rpc > ripple.protocol
|
||||
ripple.rpc > ripple.resource
|
||||
ripple.rpc > ripple.server
|
||||
ripple.rpc > ripple.shamap
|
||||
ripple.server > ripple.basics
|
||||
ripple.server > ripple.beast
|
||||
ripple.server > ripple.crypto
|
||||
ripple.server > ripple.json
|
||||
ripple.server > ripple.protocol
|
||||
ripple.shamap > ripple.basics
|
||||
ripple.shamap > ripple.beast
|
||||
ripple.shamap > ripple.crypto
|
||||
ripple.shamap > ripple.nodestore
|
||||
ripple.shamap > ripple.protocol
|
||||
test.app > ripple.app
|
||||
test.app > ripple.basics
|
||||
test.app > ripple.beast
|
||||
test.app > ripple.core
|
||||
test.app > ripple.json
|
||||
test.app > ripple.ledger
|
||||
test.app > ripple.overlay
|
||||
test.app > ripple.protocol
|
||||
test.app > ripple.resource
|
||||
test.app > ripple.rpc
|
||||
test.app > test.jtx
|
||||
test.app > test.rpc
|
||||
test.app > test.toplevel
|
||||
test.app > test.unit_test
|
||||
test.basics > ripple.basics
|
||||
test.basics > ripple.beast
|
||||
test.basics > ripple.json
|
||||
test.basics > ripple.protocol
|
||||
test.basics > ripple.rpc
|
||||
test.basics > test.jtx
|
||||
test.basics > test.unit_test
|
||||
test.beast > ripple.basics
|
||||
test.beast > ripple.beast
|
||||
test.conditions > ripple.basics
|
||||
test.conditions > ripple.beast
|
||||
test.conditions > ripple.conditions
|
||||
test.consensus > ripple.app
|
||||
test.consensus > ripple.basics
|
||||
test.consensus > ripple.beast
|
||||
test.consensus > ripple.consensus
|
||||
test.consensus > ripple.ledger
|
||||
test.consensus > test.csf
|
||||
test.consensus > test.toplevel
|
||||
test.consensus > test.unit_test
|
||||
test.core > ripple.basics
|
||||
test.core > ripple.beast
|
||||
test.core > ripple.core
|
||||
test.core > ripple.crypto
|
||||
test.core > ripple.json
|
||||
test.core > ripple.server
|
||||
test.core > test.jtx
|
||||
test.core > test.toplevel
|
||||
test.core > test.unit_test
|
||||
test.csf > ripple.basics
|
||||
test.csf > ripple.beast
|
||||
test.csf > ripple.consensus
|
||||
test.csf > ripple.json
|
||||
test.csf > ripple.protocol
|
||||
test.json > ripple.beast
|
||||
test.json > ripple.json
|
||||
test.json > test.jtx
|
||||
test.jtx > ripple.app
|
||||
test.jtx > ripple.basics
|
||||
test.jtx > ripple.beast
|
||||
test.jtx > ripple.consensus
|
||||
test.jtx > ripple.core
|
||||
test.jtx > ripple.json
|
||||
test.jtx > ripple.ledger
|
||||
test.jtx > ripple.net
|
||||
test.jtx > ripple.protocol
|
||||
test.jtx > ripple.resource
|
||||
test.jtx > ripple.rpc
|
||||
test.jtx > ripple.server
|
||||
test.ledger > ripple.app
|
||||
test.ledger > ripple.basics
|
||||
test.ledger > ripple.beast
|
||||
test.ledger > ripple.core
|
||||
test.ledger > ripple.ledger
|
||||
test.ledger > ripple.protocol
|
||||
test.ledger > test.jtx
|
||||
test.ledger > test.toplevel
|
||||
test.net > ripple.net
|
||||
test.net > test.jtx
|
||||
test.net > test.toplevel
|
||||
test.net > test.unit_test
|
||||
test.nodestore > ripple.app
|
||||
test.nodestore > ripple.basics
|
||||
test.nodestore > ripple.beast
|
||||
test.nodestore > ripple.core
|
||||
test.nodestore > ripple.nodestore
|
||||
test.nodestore > ripple.protocol
|
||||
test.nodestore > ripple.unity
|
||||
test.nodestore > test.jtx
|
||||
test.nodestore > test.toplevel
|
||||
test.nodestore > test.unit_test
|
||||
test.overlay > ripple.app
|
||||
test.overlay > ripple.basics
|
||||
test.overlay > ripple.beast
|
||||
test.overlay > ripple.overlay
|
||||
test.overlay > ripple.peerfinder
|
||||
test.overlay > ripple.protocol
|
||||
test.overlay > ripple.shamap
|
||||
test.overlay > test.jtx
|
||||
test.overlay > test.unit_test
|
||||
test.peerfinder > ripple.basics
|
||||
test.peerfinder > ripple.beast
|
||||
test.peerfinder > ripple.core
|
||||
test.peerfinder > ripple.peerfinder
|
||||
test.peerfinder > ripple.protocol
|
||||
test.peerfinder > test.beast
|
||||
test.peerfinder > test.unit_test
|
||||
test.protocol > ripple.basics
|
||||
test.protocol > ripple.beast
|
||||
test.protocol > ripple.crypto
|
||||
test.protocol > ripple.json
|
||||
test.protocol > ripple.protocol
|
||||
test.protocol > test.toplevel
|
||||
test.resource > ripple.basics
|
||||
test.resource > ripple.beast
|
||||
test.resource > ripple.resource
|
||||
test.resource > test.unit_test
|
||||
test.rpc > ripple.app
|
||||
test.rpc > ripple.basics
|
||||
test.rpc > ripple.beast
|
||||
test.rpc > ripple.core
|
||||
test.rpc > ripple.json
|
||||
test.rpc > ripple.net
|
||||
test.rpc > ripple.nodestore
|
||||
test.rpc > ripple.overlay
|
||||
test.rpc > ripple.protocol
|
||||
test.rpc > ripple.resource
|
||||
test.rpc > ripple.rpc
|
||||
test.rpc > test.jtx
|
||||
test.rpc > test.nodestore
|
||||
test.rpc > test.toplevel
|
||||
test.server > ripple.app
|
||||
test.server > ripple.basics
|
||||
test.server > ripple.beast
|
||||
test.server > ripple.core
|
||||
test.server > ripple.json
|
||||
test.server > ripple.rpc
|
||||
test.server > ripple.server
|
||||
test.server > test.jtx
|
||||
test.server > test.toplevel
|
||||
test.server > test.unit_test
|
||||
test.shamap > ripple.basics
|
||||
test.shamap > ripple.beast
|
||||
test.shamap > ripple.nodestore
|
||||
test.shamap > ripple.protocol
|
||||
test.shamap > ripple.shamap
|
||||
test.shamap > test.unit_test
|
||||
test.toplevel > ripple.json
|
||||
test.toplevel > test.csf
|
||||
test.unit_test > ripple.basics
|
||||
test.unit_test > ripple.beast
|
||||
212
CMakeLists.txt
212
CMakeLists.txt
@@ -1,137 +1,147 @@
|
||||
cmake_minimum_required(VERSION 3.16)
|
||||
|
||||
if(POLICY CMP0074)
|
||||
cmake_policy(SET CMP0074 NEW)
|
||||
endif()
|
||||
if(POLICY CMP0077)
|
||||
cmake_policy(SET CMP0077 NEW)
|
||||
endif()
|
||||
if (POLICY CMP0074)
|
||||
cmake_policy(SET CMP0074 NEW)
|
||||
endif ()
|
||||
if (POLICY CMP0077)
|
||||
cmake_policy(SET CMP0077 NEW)
|
||||
endif ()
|
||||
|
||||
# Fix "unrecognized escape" issues when passing CMAKE_MODULE_PATH on Windows.
|
||||
file(TO_CMAKE_PATH "${CMAKE_MODULE_PATH}" CMAKE_MODULE_PATH)
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake")
|
||||
if (DEFINED CMAKE_MODULE_PATH)
|
||||
file(TO_CMAKE_PATH "${CMAKE_MODULE_PATH}" CMAKE_MODULE_PATH)
|
||||
endif ()
|
||||
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
|
||||
|
||||
project(rippled)
|
||||
project(xrpl)
|
||||
set(CMAKE_CXX_EXTENSIONS OFF)
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||
|
||||
include(CompilationEnv)
|
||||
|
||||
if (is_gcc)
|
||||
# GCC-specific fixes
|
||||
add_compile_options(-Wno-unknown-pragmas -Wno-subobject-linkage)
|
||||
# -Wno-subobject-linkage can be removed when we upgrade GCC version to at least 13.3
|
||||
elseif (is_clang)
|
||||
# Clang-specific fixes
|
||||
add_compile_options(-Wno-unknown-warning-option) # Ignore unknown warning options
|
||||
elseif (is_msvc)
|
||||
# MSVC-specific fixes
|
||||
add_compile_options(/wd4068) # Ignore unknown pragmas
|
||||
endif ()
|
||||
|
||||
# Enable ccache to speed up builds.
|
||||
include(Ccache)
|
||||
|
||||
# make GIT_COMMIT_HASH define available to all sources
|
||||
find_package(Git)
|
||||
if(Git_FOUND)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git describe --always --abbrev=40
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
|
||||
if(gch)
|
||||
if (Git_FOUND)
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse HEAD
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
|
||||
if (gch)
|
||||
set(GIT_COMMIT_HASH "${gch}")
|
||||
message(STATUS gch: ${GIT_COMMIT_HASH})
|
||||
add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
|
||||
endif()
|
||||
endif() #git
|
||||
endif ()
|
||||
|
||||
if(thread_safety_analysis)
|
||||
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS)
|
||||
add_compile_options("-stdlib=libc++")
|
||||
add_link_options("-stdlib=libc++")
|
||||
endif()
|
||||
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse --abbrev-ref HEAD
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gb)
|
||||
if (gb)
|
||||
set(GIT_BRANCH "${gb}")
|
||||
message(STATUS gb: ${GIT_BRANCH})
|
||||
add_definitions(-DGIT_BRANCH="${GIT_BRANCH}")
|
||||
endif ()
|
||||
endif () # git
|
||||
|
||||
include (CheckCXXCompilerFlag)
|
||||
include (FetchContent)
|
||||
include (ExternalProject)
|
||||
include (CMakeFuncs) # must come *after* ExternalProject b/c it overrides one function in EP
|
||||
if (thread_safety_analysis)
|
||||
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS
|
||||
-DXRPL_ENABLE_THREAD_SAFETY_ANNOTATIONS)
|
||||
add_compile_options("-stdlib=libc++")
|
||||
add_link_options("-stdlib=libc++")
|
||||
endif ()
|
||||
|
||||
include(CheckCXXCompilerFlag)
|
||||
include(FetchContent)
|
||||
include(ExternalProject)
|
||||
include(CMakeFuncs) # must come *after* ExternalProject b/c it overrides one function in EP
|
||||
if (target)
|
||||
message (FATAL_ERROR "The target option has been removed - use native cmake options to control build")
|
||||
message(FATAL_ERROR "The target option has been removed - use native cmake options to control build")
|
||||
endif ()
|
||||
|
||||
include(RippledSanity)
|
||||
include(RippledVersion)
|
||||
include(RippledSettings)
|
||||
# this check has to remain in the top-level cmake
|
||||
# because of the early return statement
|
||||
include(XrplSanity)
|
||||
include(XrplVersion)
|
||||
include(XrplSettings)
|
||||
# this check has to remain in the top-level cmake because of the early return statement
|
||||
if (packages_only)
|
||||
if (NOT TARGET rpm)
|
||||
message (FATAL_ERROR "packages_only requested, but targets were not created - is docker installed?")
|
||||
endif()
|
||||
return ()
|
||||
if (NOT TARGET rpm)
|
||||
message(FATAL_ERROR "packages_only requested, but targets were not created - is docker installed?")
|
||||
endif ()
|
||||
return()
|
||||
endif ()
|
||||
include(RippledCompiler)
|
||||
include(RippledInterface)
|
||||
include(XrplCompiler)
|
||||
include(XrplSanitizers)
|
||||
include(XrplInterface)
|
||||
|
||||
option(only_docs "Include only the docs target?" FALSE)
|
||||
include(RippledDocs)
|
||||
if(only_docs)
|
||||
return()
|
||||
endif()
|
||||
|
||||
###
|
||||
include(XrplDocs)
|
||||
if (only_docs)
|
||||
return()
|
||||
endif ()
|
||||
|
||||
include(deps/Boost)
|
||||
find_package(OpenSSL 1.1.1 REQUIRED)
|
||||
set_target_properties(OpenSSL::SSL PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
|
||||
)
|
||||
set(SECP256K1_INSTALL TRUE)
|
||||
add_subdirectory(external/secp256k1)
|
||||
add_library(secp256k1::secp256k1 ALIAS secp256k1)
|
||||
add_subdirectory(external/ed25519-donna)
|
||||
|
||||
add_subdirectory(external/antithesis-sdk)
|
||||
find_package(date REQUIRED)
|
||||
find_package(ed25519 REQUIRED)
|
||||
find_package(gRPC REQUIRED)
|
||||
find_package(lz4 REQUIRED)
|
||||
# Target names with :: are not allowed in a generator expression.
|
||||
# We need to pull the include directories and imported location properties
|
||||
# from separate targets.
|
||||
find_package(LibArchive REQUIRED)
|
||||
find_package(lz4 REQUIRED)
|
||||
find_package(nudb REQUIRED)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
find_package(secp256k1 REQUIRED)
|
||||
find_package(SOCI REQUIRED)
|
||||
find_package(SQLite3 REQUIRED)
|
||||
find_package(Snappy REQUIRED)
|
||||
|
||||
option(rocksdb "Enable RocksDB" ON)
|
||||
if(rocksdb)
|
||||
find_package(RocksDB REQUIRED)
|
||||
set_target_properties(RocksDB::rocksdb PROPERTIES
|
||||
INTERFACE_COMPILE_DEFINITIONS RIPPLE_ROCKSDB_AVAILABLE=1
|
||||
)
|
||||
target_link_libraries(ripple_libs INTERFACE RocksDB::rocksdb)
|
||||
endif()
|
||||
|
||||
find_package(nudb REQUIRED)
|
||||
find_package(date REQUIRED)
|
||||
find_package(xxHash REQUIRED)
|
||||
|
||||
target_link_libraries(ripple_libs INTERFACE
|
||||
ed25519::ed25519
|
||||
lz4::lz4
|
||||
OpenSSL::Crypto
|
||||
OpenSSL::SSL
|
||||
secp256k1::secp256k1
|
||||
soci::soci
|
||||
SQLite::SQLite3
|
||||
)
|
||||
target_link_libraries(
|
||||
xrpl_libs
|
||||
INTERFACE ed25519::ed25519
|
||||
lz4::lz4
|
||||
OpenSSL::Crypto
|
||||
OpenSSL::SSL
|
||||
secp256k1::secp256k1
|
||||
soci::soci
|
||||
SQLite::SQLite3)
|
||||
|
||||
option(rocksdb "Enable RocksDB" ON)
|
||||
if (rocksdb)
|
||||
find_package(RocksDB REQUIRED)
|
||||
set_target_properties(RocksDB::rocksdb PROPERTIES INTERFACE_COMPILE_DEFINITIONS XRPL_ROCKSDB_AVAILABLE=1)
|
||||
target_link_libraries(xrpl_libs INTERFACE RocksDB::rocksdb)
|
||||
endif ()
|
||||
|
||||
# Work around changes to Conan recipe for now.
|
||||
if(TARGET nudb::core)
|
||||
set(nudb nudb::core)
|
||||
elseif(TARGET NuDB::nudb)
|
||||
set(nudb NuDB::nudb)
|
||||
else()
|
||||
message(FATAL_ERROR "unknown nudb target")
|
||||
endif()
|
||||
target_link_libraries(ripple_libs INTERFACE ${nudb})
|
||||
if (TARGET nudb::core)
|
||||
set(nudb nudb::core)
|
||||
elseif (TARGET NuDB::nudb)
|
||||
set(nudb NuDB::nudb)
|
||||
else ()
|
||||
message(FATAL_ERROR "unknown nudb target")
|
||||
endif ()
|
||||
target_link_libraries(xrpl_libs INTERFACE ${nudb})
|
||||
|
||||
if(reporting)
|
||||
find_package(cassandra-cpp-driver REQUIRED)
|
||||
find_package(PostgreSQL REQUIRED)
|
||||
target_link_libraries(ripple_libs INTERFACE
|
||||
cassandra-cpp-driver::cassandra-cpp-driver
|
||||
PostgreSQL::PostgreSQL
|
||||
)
|
||||
endif()
|
||||
if (coverage)
|
||||
include(XrplCov)
|
||||
endif ()
|
||||
|
||||
if(coverage)
|
||||
include(RippledCov)
|
||||
endif()
|
||||
set(PROJECT_EXPORT_SET XrplExports)
|
||||
include(XrplCore)
|
||||
include(XrplInstall)
|
||||
include(XrplValidatorKeys)
|
||||
|
||||
###
|
||||
|
||||
include(RippledCore)
|
||||
include(RippledInstall)
|
||||
include(RippledMultiConfig)
|
||||
include(RippledValidatorKeys)
|
||||
if (tests)
|
||||
include(CTest)
|
||||
add_subdirectory(src/tests/libxrpl)
|
||||
endif ()
|
||||
|
||||
981
CONTRIBUTING.md
981
CONTRIBUTING.md
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
||||
ISC License
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2011, Arthur Britto, David Schwartz, Jed McCaleb, Vinnie Falco, Bob Way, Eric Lombrozo, Nikolaos D. Bougalis, Howard Hinnant.
|
||||
Copyright (c) 2012-2020, the XRP Ledger developers.
|
||||
Copyright (c) 2012-2025, the XRP Ledger developers.
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
@@ -14,4 +14,3 @@ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
|
||||
239
OpenTelemetryPlan/00-tracing-fundamentals.md
Normal file
239
OpenTelemetryPlan/00-tracing-fundamentals.md
Normal file
@@ -0,0 +1,239 @@
|
||||
# Distributed Tracing Fundamentals
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Next**: [Architecture Analysis](./01-architecture-analysis.md)
|
||||
|
||||
---
|
||||
|
||||
## What is Distributed Tracing?
|
||||
|
||||
Distributed tracing is a method for tracking data objects as they flow through distributed systems. In a network like XRP Ledger, a single transaction touches multiple independent nodes—each with no shared memory or logging. Distributed tracing connects these dots.
|
||||
|
||||
**Without tracing:** You see isolated logs on each node with no way to correlate them.
|
||||
|
||||
**With tracing:** You see the complete journey of a transaction or an event across all nodes it touched.
|
||||
|
||||
---
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### 1. Trace
|
||||
|
||||
A **trace** represents the entire journey of a request through the system. It has a unique `trace_id` that stays constant across all nodes.
|
||||
|
||||
```
|
||||
Trace ID: abc123
|
||||
├── Node A: received transaction
|
||||
├── Node B: relayed transaction
|
||||
├── Node C: included in consensus
|
||||
└── Node D: applied to ledger
|
||||
```
|
||||
|
||||
### 2. Span
|
||||
|
||||
A **span** represents a single unit of work within a trace. Each span has:
|
||||
|
||||
| Attribute | Description | Example |
|
||||
| ---------------- | --------------------- | -------------------------- |
|
||||
| `trace_id` | Links to parent trace | `abc123` |
|
||||
| `span_id` | Unique identifier | `span456` |
|
||||
| `parent_span_id` | Parent span (if any) | `p_span123` |
|
||||
| `name` | Operation name | `rpc.submit` |
|
||||
| `start_time` | When work began | `2024-01-15T10:30:00Z` |
|
||||
| `end_time` | When work completed | `2024-01-15T10:30:00.050Z` |
|
||||
| `attributes` | Key-value metadata | `tx.hash=ABC...` |
|
||||
| `status` | OK, ERROR MSG | `OK` |
|
||||
|
||||
### 3. Trace Context
|
||||
|
||||
**Trace context** is the data that propagates between services to link spans together. It contains:
|
||||
|
||||
- `trace_id` - The trace this span belongs to
|
||||
- `span_id` - The current span (becomes parent for child spans)
|
||||
- `trace_flags` - Sampling decisions
|
||||
|
||||
---
|
||||
|
||||
## How Spans Form a Trace
|
||||
|
||||
Spans have parent-child relationships forming a tree structure:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph trace["Trace: abc123"]
|
||||
A["tx.submit<br/>span_id: 001<br/>50ms"] --> B["tx.validate<br/>span_id: 002<br/>5ms"]
|
||||
A --> C["tx.relay<br/>span_id: 003<br/>10ms"]
|
||||
A --> D["tx.apply<br/>span_id: 004<br/>30ms"]
|
||||
D --> E["ledger.update<br/>span_id: 005<br/>20ms"]
|
||||
end
|
||||
|
||||
style A fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style B fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style C fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style D fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style E fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
The same trace visualized as a **timeline (Gantt chart)**:
|
||||
|
||||
```
|
||||
Time → 0ms 10ms 20ms 30ms 40ms 50ms
|
||||
├───────────────────────────────────────────┤
|
||||
tx.submit│▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓│
|
||||
├─────┤
|
||||
tx.valid │▓▓▓▓▓│
|
||||
│ ├──────────┤
|
||||
tx.relay │ │▓▓▓▓▓▓▓▓▓▓│
|
||||
│ ├────────────────────────────┤
|
||||
tx.apply │ │▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓│
|
||||
│ ├──────────────────┤
|
||||
ledger │ │▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓│
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Distributed Traces Across Nodes
|
||||
|
||||
In distributed systems like rippled, traces span **multiple independent nodes**. The trace context must be propagated in network messages:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client
|
||||
participant NodeA as Node A
|
||||
participant NodeB as Node B
|
||||
participant NodeC as Node C
|
||||
|
||||
Client->>NodeA: Submit TX<br/>(no trace context)
|
||||
|
||||
Note over NodeA: Creates new trace<br/>trace_id: abc123<br/>span: tx.receive
|
||||
|
||||
NodeA->>NodeB: Relay TX<br/>(trace_id: abc123, parent: 001)
|
||||
|
||||
Note over NodeB: Creates child span<br/>span: tx.relay<br/>parent_span_id: 001
|
||||
|
||||
NodeA->>NodeC: Relay TX<br/>(trace_id: abc123, parent: 001)
|
||||
|
||||
Note over NodeC: Creates child span<br/>span: tx.relay<br/>parent_span_id: 001
|
||||
|
||||
Note over NodeA,NodeC: All spans share trace_id: abc123<br/>enabling correlation across nodes
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Context Propagation
|
||||
|
||||
For traces to work across nodes, **trace context must be propagated** in messages.
|
||||
|
||||
### What's in the Context (32 bytes)
|
||||
|
||||
| Field | Size | Description |
|
||||
| ------------- | ---------- | ------------------------------------------------------- |
|
||||
| `trace_id` | 16 bytes | Identifies the entire trace (constant across all nodes) |
|
||||
| `span_id` | 8 bytes | The sender's current span (becomes parent on receiver) |
|
||||
| `trace_flags` | 4 bytes | Sampling decision flags |
|
||||
| `trace_state` | ~0-4 bytes | Optional vendor-specific data |
|
||||
|
||||
### How span_id Changes at Each Hop
|
||||
|
||||
Only **one** `span_id` travels in the context - the sender's current span. Each node:
|
||||
1. Extracts the received `span_id` and uses it as the `parent_span_id`
|
||||
2. Creates a **new** `span_id` for its own span
|
||||
3. Sends its own `span_id` as the parent when forwarding
|
||||
|
||||
```
|
||||
Node A Node B Node C
|
||||
────── ────── ──────
|
||||
|
||||
Span AAA Span BBB Span CCC
|
||||
│ │ │
|
||||
▼ ▼ ▼
|
||||
Context out: Context out: Context out:
|
||||
├─ trace_id: abc123 ├─ trace_id: abc123 ├─ trace_id: abc123
|
||||
├─ span_id: AAA ──────────► ├─ span_id: BBB ──────────► ├─ span_id: CCC ──────►
|
||||
└─ flags: 01 └─ flags: 01 └─ flags: 01
|
||||
│ │
|
||||
parent = AAA parent = BBB
|
||||
```
|
||||
|
||||
The `trace_id` stays constant, but `span_id` **changes at every hop** to maintain the parent-child chain.
|
||||
|
||||
### Propagation Formats
|
||||
|
||||
There are two patterns:
|
||||
|
||||
### HTTP/RPC Headers (W3C Trace Context)
|
||||
|
||||
```
|
||||
traceparent: 00-abc123def456-span789-01
|
||||
│ │ │ │
|
||||
│ │ │ └── Flags (sampled)
|
||||
│ │ └── Parent span ID
|
||||
│ └── Trace ID
|
||||
└── Version
|
||||
```
|
||||
|
||||
### Protocol Buffers (rippled P2P messages)
|
||||
|
||||
```protobuf
|
||||
message TMTransaction {
|
||||
bytes rawTransaction = 1;
|
||||
// ... existing fields ...
|
||||
|
||||
// Trace context extension
|
||||
bytes trace_parent = 100; // W3C traceparent
|
||||
bytes trace_state = 101; // W3C tracestate
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Sampling
|
||||
|
||||
Not every trace needs to be recorded. **Sampling** reduces overhead:
|
||||
|
||||
### Head Sampling (at trace start)
|
||||
```
|
||||
Request arrives → Random 10% chance → Record or skip entire trace
|
||||
```
|
||||
- ✅ Low overhead
|
||||
- ❌ May miss interesting traces
|
||||
|
||||
### Tail Sampling (after trace completes)
|
||||
```
|
||||
Trace completes → Collector evaluates:
|
||||
- Error? → KEEP
|
||||
- Slow? → KEEP
|
||||
- Normal? → Sample 10%
|
||||
```
|
||||
- ✅ Never loses important traces
|
||||
- ❌ Higher memory usage at collector
|
||||
|
||||
---
|
||||
|
||||
## Key Benefits for rippled
|
||||
|
||||
| Challenge | How Tracing Helps |
|
||||
| ---------------------------------- | ---------------------------------------- |
|
||||
| "Where is my transaction?" | Follow trace across all nodes it touched |
|
||||
| "Why was consensus slow?" | See timing breakdown of each phase |
|
||||
| "Which node is the bottleneck?" | Compare span durations across nodes |
|
||||
| "What happened during the outage?" | Correlate errors across the network |
|
||||
|
||||
---
|
||||
|
||||
## Glossary
|
||||
|
||||
| Term | Definition |
|
||||
| ------------------- | --------------------------------------------------------------- |
|
||||
| **Trace** | Complete journey of a request, identified by `trace_id` |
|
||||
| **Span** | Single operation within a trace |
|
||||
| **Context** | Data propagated between services (`trace_id`, `span_id`, flags) |
|
||||
| **Instrumentation** | Code that creates spans and propagates context |
|
||||
| **Collector** | Service that receives, processes, and exports traces |
|
||||
| **Backend** | Storage/visualization system (Jaeger, Tempo, etc.) |
|
||||
| **Head Sampling** | Sampling decision at trace start |
|
||||
| **Tail Sampling** | Sampling decision after trace completes |
|
||||
|
||||
---
|
||||
|
||||
*Next: [Architecture Analysis](./01-architecture-analysis.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
328
OpenTelemetryPlan/01-architecture-analysis.md
Normal file
328
OpenTelemetryPlan/01-architecture-analysis.md
Normal file
@@ -0,0 +1,328 @@
|
||||
# Architecture Analysis
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Design Decisions](./02-design-decisions.md) | [Implementation Strategy](./03-implementation-strategy.md)
|
||||
|
||||
---
|
||||
|
||||
## 1.1 Current rippled Architecture Overview
|
||||
|
||||
The rippled node software consists of several interconnected components that need instrumentation for distributed tracing:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph rippled["rippled Node"]
|
||||
subgraph services["Core Services"]
|
||||
RPC["RPC Server<br/>(HTTP/WS/gRPC)"]
|
||||
Overlay["Overlay<br/>(P2P Network)"]
|
||||
Consensus["Consensus<br/>(RCLConsensus)"]
|
||||
end
|
||||
|
||||
JobQueue["JobQueue<br/>(Thread Pool)"]
|
||||
|
||||
subgraph processing["Processing Layer"]
|
||||
NetworkOPs["NetworkOPs<br/>(Tx Processing)"]
|
||||
LedgerMaster["LedgerMaster<br/>(Ledger Mgmt)"]
|
||||
NodeStore["NodeStore<br/>(Database)"]
|
||||
end
|
||||
|
||||
subgraph observability["Existing Observability"]
|
||||
PerfLog["PerfLog<br/>(JSON)"]
|
||||
Insight["Insight<br/>(StatsD)"]
|
||||
Logging["Logging<br/>(Journal)"]
|
||||
end
|
||||
|
||||
services --> JobQueue
|
||||
JobQueue --> processing
|
||||
end
|
||||
|
||||
style rippled fill:#424242,stroke:#212121,color:#ffffff
|
||||
style services fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style processing fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style observability fill:#e65100,stroke:#bf360c,color:#ffffff
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.2 Key Components for Instrumentation
|
||||
|
||||
| Component | Location | Purpose | Trace Value |
|
||||
| ----------------- | ------------------------------------------ | ------------------------ | ---------------------------- |
|
||||
| **Overlay** | `src/xrpld/overlay/` | P2P communication | Message propagation timing |
|
||||
| **PeerImp** | `src/xrpld/overlay/detail/PeerImp.cpp` | Individual peer handling | Per-peer latency |
|
||||
| **RCLConsensus** | `src/xrpld/app/consensus/RCLConsensus.cpp` | Consensus algorithm | Round timing, phase analysis |
|
||||
| **NetworkOPs** | `src/xrpld/app/misc/NetworkOPs.cpp` | Transaction processing | Tx lifecycle tracking |
|
||||
| **ServerHandler** | `src/xrpld/rpc/detail/ServerHandler.cpp` | RPC entry point | Request latency |
|
||||
| **RPCHandler** | `src/xrpld/rpc/detail/RPCHandler.cpp` | Command execution | Per-command timing |
|
||||
| **JobQueue** | `src/xrpl/core/JobQueue.h` | Async task execution | Queue wait times |
|
||||
|
||||
---
|
||||
|
||||
## 1.3 Transaction Flow Diagram
|
||||
|
||||
Transaction flow spans multiple nodes in the network. Each node creates linked spans to form a distributed trace:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client
|
||||
participant PeerA as Peer A (Receive)
|
||||
participant PeerB as Peer B (Relay)
|
||||
participant PeerC as Peer C (Validate)
|
||||
|
||||
Client->>PeerA: 1. Submit TX
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerA: tx.receive SPAN START
|
||||
PeerA->>PeerA: HashRouter Deduplication
|
||||
PeerA->>PeerA: tx.validate (child span)
|
||||
end
|
||||
|
||||
PeerA->>PeerB: 2. Relay TX (with trace ctx)
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerB: tx.receive (linked span)
|
||||
end
|
||||
|
||||
PeerB->>PeerC: 3. Relay TX
|
||||
|
||||
rect rgb(230, 245, 255)
|
||||
Note over PeerC: tx.receive (linked span)
|
||||
PeerC->>PeerC: tx.process
|
||||
end
|
||||
|
||||
Note over Client,PeerC: DISTRIBUTED TRACE (same trace_id: abc123)
|
||||
```
|
||||
|
||||
### Trace Structure
|
||||
|
||||
```
|
||||
trace_id: abc123
|
||||
├── span: tx.receive (Peer A)
|
||||
│ ├── span: tx.validate
|
||||
│ └── span: tx.relay
|
||||
├── span: tx.receive (Peer B) [parent: Peer A]
|
||||
│ └── span: tx.relay
|
||||
└── span: tx.receive (Peer C) [parent: Peer B]
|
||||
└── span: tx.process
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.4 Consensus Round Flow
|
||||
|
||||
Consensus rounds are multi-phase operations that benefit significantly from tracing:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph round["consensus.round (root span)"]
|
||||
attrs["Attributes:<br/>xrpl.consensus.ledger.seq = 12345678<br/>xrpl.consensus.mode = proposing<br/>xrpl.consensus.proposers = 35"]
|
||||
|
||||
subgraph open["consensus.phase.open"]
|
||||
open_desc["Duration: ~3s<br/>Waiting for transactions"]
|
||||
end
|
||||
|
||||
subgraph establish["consensus.phase.establish"]
|
||||
est_attrs["proposals_received = 28<br/>disputes_resolved = 3"]
|
||||
est_children["├── consensus.proposal.receive (×28)<br/>├── consensus.proposal.send (×1)<br/>└── consensus.dispute.resolve (×3)"]
|
||||
end
|
||||
|
||||
subgraph accept["consensus.phase.accept"]
|
||||
acc_attrs["transactions_applied = 150<br/>ledger.hash = DEF456..."]
|
||||
acc_children["├── ledger.build<br/>└── ledger.validate"]
|
||||
end
|
||||
|
||||
attrs --> open
|
||||
open --> establish
|
||||
establish --> accept
|
||||
end
|
||||
|
||||
style round fill:#f57f17,stroke:#e65100,color:#ffffff
|
||||
style open fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style establish fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style accept fill:#c2185b,stroke:#880e4f,color:#ffffff
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.5 RPC Request Flow
|
||||
|
||||
RPC requests support W3C Trace Context headers for distributed tracing across services:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph request["rpc.request (root span)"]
|
||||
http["HTTP Request<br/>POST /<br/>traceparent: 00-abc123...-def456...-01"]
|
||||
|
||||
attrs["Attributes:<br/>http.method = POST<br/>net.peer.ip = 192.168.1.100<br/>xrpl.rpc.command = submit"]
|
||||
|
||||
subgraph enqueue["jobqueue.enqueue"]
|
||||
job_attr["xrpl.job.type = jtCLIENT_RPC"]
|
||||
end
|
||||
|
||||
subgraph command["rpc.command.submit"]
|
||||
cmd_attrs["xrpl.rpc.version = 2<br/>xrpl.rpc.role = user"]
|
||||
cmd_children["├── tx.deserialize<br/>├── tx.validate_local<br/>└── tx.submit_to_network"]
|
||||
end
|
||||
|
||||
response["Response: 200 OK<br/>Duration: 45ms"]
|
||||
|
||||
http --> attrs
|
||||
attrs --> enqueue
|
||||
enqueue --> command
|
||||
command --> response
|
||||
end
|
||||
|
||||
style request fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style enqueue fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style command fill:#e65100,stroke:#bf360c,color:#ffffff
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.6 Key Trace Points
|
||||
|
||||
The following table identifies priority instrumentation points across the codebase:
|
||||
|
||||
| Category | Span Name | File | Method | Priority |
|
||||
| --------------- | ---------------------- | -------------------- | ---------------------- | -------- |
|
||||
| **Transaction** | `tx.receive` | `PeerImp.cpp` | `handleTransaction()` | High |
|
||||
| **Transaction** | `tx.validate` | `NetworkOPs.cpp` | `processTransaction()` | High |
|
||||
| **Transaction** | `tx.process` | `NetworkOPs.cpp` | `doTransactionSync()` | High |
|
||||
| **Transaction** | `tx.relay` | `OverlayImpl.cpp` | `relay()` | Medium |
|
||||
| **Consensus** | `consensus.round` | `RCLConsensus.cpp` | `startRound()` | High |
|
||||
| **Consensus** | `consensus.phase.*` | `Consensus.h` | `timerEntry()` | High |
|
||||
| **Consensus** | `consensus.proposal.*` | `RCLConsensus.cpp` | `peerProposal()` | Medium |
|
||||
| **RPC** | `rpc.request` | `ServerHandler.cpp` | `onRequest()` | High |
|
||||
| **RPC** | `rpc.command.*` | `RPCHandler.cpp` | `doCommand()` | High |
|
||||
| **Peer** | `peer.connect` | `OverlayImpl.cpp` | `onHandoff()` | Low |
|
||||
| **Peer** | `peer.message.*` | `PeerImp.cpp` | `onMessage()` | Low |
|
||||
| **Ledger** | `ledger.acquire` | `InboundLedgers.cpp` | `acquire()` | Medium |
|
||||
| **Ledger** | `ledger.build` | `RCLConsensus.cpp` | `buildLCL()` | High |
|
||||
|
||||
---
|
||||
|
||||
## 1.7 Instrumentation Priority
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Instrumentation Priority Matrix
|
||||
x-axis Low Complexity --> High Complexity
|
||||
y-axis Low Value --> High Value
|
||||
quadrant-1 Implement First
|
||||
quadrant-2 Plan Carefully
|
||||
quadrant-3 Quick Wins
|
||||
quadrant-4 Consider Later
|
||||
|
||||
RPC Tracing: [0.3, 0.85]
|
||||
Transaction Tracing: [0.65, 0.92]
|
||||
Consensus Tracing: [0.75, 0.87]
|
||||
Peer Message Tracing: [0.4, 0.3]
|
||||
Ledger Acquisition: [0.5, 0.6]
|
||||
JobQueue Tracing: [0.35, 0.5]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 1.8 Observable Outcomes
|
||||
|
||||
After implementing OpenTelemetry, operators and developers will gain visibility into the following:
|
||||
|
||||
### 1.8.1 What You Will See: Traces
|
||||
|
||||
| Trace Type | Description | Example Query in Grafana/Tempo |
|
||||
| -------------------------- | ------------------------------------------------------------------------------------------- | ------------------------------------------------------ |
|
||||
| **Transaction Lifecycle** | Full journey from RPC submission through validation, relay, consensus, and ledger inclusion | `{service.name="rippled" && xrpl.tx.hash="ABC123..."}` |
|
||||
| **Cross-Node Propagation** | Transaction path across multiple rippled nodes with timing | `{xrpl.tx.relay_count > 0}` |
|
||||
| **Consensus Rounds** | Complete round with all phases (open, establish, accept) | `{span.name=~"consensus.round.*"}` |
|
||||
| **RPC Request Processing** | Individual command execution with timing breakdown | `{xrpl.rpc.command="account_info"}` |
|
||||
| **Ledger Acquisition** | Peer-to-peer ledger data requests and responses | `{span.name="ledger.acquire"}` |
|
||||
|
||||
### 1.8.2 What You Will See: Metrics (Derived from Traces)
|
||||
|
||||
| Metric | Description | Dashboard Panel |
|
||||
| ----------------------------- | -------------------------------------- | --------------------------- |
|
||||
| **RPC Latency (p50/p95/p99)** | Response time distribution per command | Heatmap by command |
|
||||
| **Transaction Throughput** | Transactions processed per second | Time series graph |
|
||||
| **Consensus Round Duration** | Time to complete consensus phases | Histogram |
|
||||
| **Cross-Node Latency** | Time for transaction to reach N nodes | Line chart with percentiles |
|
||||
| **Error Rate** | Failed transactions/RPC calls by type | Stacked bar chart |
|
||||
|
||||
### 1.8.3 Concrete Dashboard Examples
|
||||
|
||||
**Transaction Trace View (Jaeger/Tempo):**
|
||||
```
|
||||
┌────────────────────────────────────────────────────────────────────────────────┐
|
||||
│ Trace: abc123... (Transaction Submission) Duration: 847ms │
|
||||
├────────────────────────────────────────────────────────────────────────────────┤
|
||||
│ ├── rpc.request [ServerHandler] ████░░░░░░ 45ms │
|
||||
│ │ └── rpc.command.submit [RPCHandler] ████░░░░░░ 42ms │
|
||||
│ │ └── tx.receive [NetworkOPs] ███░░░░░░░ 35ms │
|
||||
│ │ ├── tx.validate [TxQ] █░░░░░░░░░ 8ms │
|
||||
│ │ └── tx.relay [Overlay] ██░░░░░░░░ 15ms │
|
||||
│ │ ├── tx.receive [Node-B] █████░░░░░ 52ms │
|
||||
│ │ │ └── tx.relay [Node-B] ██░░░░░░░░ 18ms │
|
||||
│ │ └── tx.receive [Node-C] ██████░░░░ 65ms │
|
||||
│ └── consensus.round [RCLConsensus] ████████░░ 720ms │
|
||||
│ ├── consensus.phase.open ██░░░░░░░░ 180ms │
|
||||
│ ├── consensus.phase.establish █████░░░░░ 480ms │
|
||||
│ └── consensus.phase.accept █░░░░░░░░░ 60ms │
|
||||
└────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**RPC Performance Dashboard Panel:**
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ RPC Command Latency (Last 1 Hour) │
|
||||
├─────────────────────────────────────────────────────────────┤
|
||||
│ Command │ p50 │ p95 │ p99 │ Errors │ Rate │
|
||||
│──────────────────┼────────┼────────┼────────┼────────┼──────│
|
||||
│ account_info │ 12ms │ 45ms │ 89ms │ 0.1% │ 150/s│
|
||||
│ submit │ 35ms │ 120ms │ 250ms │ 2.3% │ 45/s│
|
||||
│ ledger │ 8ms │ 25ms │ 55ms │ 0.0% │ 80/s│
|
||||
│ tx │ 15ms │ 50ms │ 100ms │ 0.5% │ 60/s│
|
||||
│ server_info │ 5ms │ 12ms │ 20ms │ 0.0% │ 200/s│
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
**Consensus Health Dashboard Panel:**
|
||||
|
||||
```mermaid
|
||||
---
|
||||
config:
|
||||
xyChart:
|
||||
width: 1200
|
||||
height: 400
|
||||
plotReservedSpacePercent: 50
|
||||
chartOrientation: vertical
|
||||
themeVariables:
|
||||
xyChart:
|
||||
plotColorPalette: "#3498db"
|
||||
---
|
||||
xychart-beta
|
||||
title "Consensus Round Duration (Last 24 Hours)"
|
||||
x-axis "Time of Day (Hours)" [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
|
||||
y-axis "Duration (seconds)" 1 --> 5
|
||||
line [2.1, 2.3, 2.5, 2.4, 2.8, 1.6, 3.2, 3.0, 3.5, 1.3, 3.8, 3.6, 4.0, 3.2, 4.3, 4.1, 4.5, 4.3, 4.2, 2.4, 4.8, 4.6, 4.9, 4.7, 5.0, 4.9, 4.8, 2.6, 4.7, 4.5, 4.2, 4.0, 2.5, 3.7, 3.2, 3.4, 2.9, 3.1, 2.6, 2.8, 2.3, 1.5, 2.7, 2.4, 2.5, 2.3, 2.2, 2.1, 2.0]
|
||||
```
|
||||
|
||||
### 1.8.4 Operator Actionable Insights
|
||||
|
||||
| Scenario | What You'll See | Action |
|
||||
| --------------------- | ---------------------------------------------------------------------------- | -------------------------------- |
|
||||
| **Slow RPC** | Span showing which phase is slow (parsing, execution, serialization) | Optimize specific code path |
|
||||
| **Transaction Stuck** | Trace stops at validation; error attribute shows reason | Fix transaction parameters |
|
||||
| **Consensus Delay** | Phase.establish taking too long; proposer attribute shows missing validators | Investigate network connectivity |
|
||||
| **Memory Spike** | Large batch of spans correlating with memory increase | Tune batch_size or sampling |
|
||||
| **Network Partition** | Traces missing cross-node links for specific peer | Check peer connectivity |
|
||||
|
||||
### 1.8.5 Developer Debugging Workflow
|
||||
|
||||
1. **Find Transaction**: Query by `xrpl.tx.hash` to get full trace
|
||||
2. **Identify Bottleneck**: Look at span durations to find slowest component
|
||||
3. **Check Attributes**: Review `xrpl.tx.validity`, `xrpl.rpc.status` for errors
|
||||
4. **Correlate Logs**: Use `trace_id` to find related PerfLog entries
|
||||
5. **Compare Nodes**: Filter by `service.instance.id` to compare behavior across nodes
|
||||
|
||||
---
|
||||
|
||||
*Next: [Design Decisions](./02-design-decisions.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
485
OpenTelemetryPlan/02-design-decisions.md
Normal file
485
OpenTelemetryPlan/02-design-decisions.md
Normal file
@@ -0,0 +1,485 @@
|
||||
# Design Decisions
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Architecture Analysis](./01-architecture-analysis.md) | [Code Samples](./04-code-samples.md)
|
||||
|
||||
---
|
||||
|
||||
## 2.1 OpenTelemetry Components
|
||||
|
||||
### 2.1.1 SDK Selection
|
||||
|
||||
**Primary Choice**: OpenTelemetry C++ SDK (`opentelemetry-cpp`)
|
||||
|
||||
| Component | Purpose | Required |
|
||||
| --------------------------------------- | ---------------------- | ----------- |
|
||||
| `opentelemetry-cpp::api` | Tracing API headers | Yes |
|
||||
| `opentelemetry-cpp::sdk` | SDK implementation | Yes |
|
||||
| `opentelemetry-cpp::ext` | Extensions (exporters) | Yes |
|
||||
| `opentelemetry-cpp::otlp_grpc_exporter` | OTLP/gRPC export | Recommended |
|
||||
| `opentelemetry-cpp::otlp_http_exporter` | OTLP/HTTP export | Alternative |
|
||||
|
||||
### 2.1.2 Instrumentation Strategy
|
||||
|
||||
**Manual Instrumentation** (recommended):
|
||||
|
||||
| Approach | Pros | Cons |
|
||||
| ---------- | ----------------------------------------------------------------- | ------------------------------------------------------- |
|
||||
| **Manual** | Precise control, optimized placement, rippled-specific attributes | More development effort |
|
||||
| **Auto** | Less code, automatic coverage | Less control, potential overhead, limited customization |
|
||||
|
||||
---
|
||||
|
||||
## 2.2 Exporter Configuration
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph nodes["rippled Nodes"]
|
||||
node1["rippled<br/>Node 1"]
|
||||
node2["rippled<br/>Node 2"]
|
||||
node3["rippled<br/>Node 3"]
|
||||
end
|
||||
|
||||
collector["OpenTelemetry<br/>Collector<br/>(sidecar or standalone)"]
|
||||
|
||||
subgraph backends["Observability Backends"]
|
||||
jaeger["Jaeger<br/>(Dev)"]
|
||||
tempo["Tempo<br/>(Prod)"]
|
||||
elastic["Elastic<br/>APM"]
|
||||
end
|
||||
|
||||
node1 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
node2 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
node3 -->|"OTLP/gRPC<br/>:4317"| collector
|
||||
|
||||
collector --> jaeger
|
||||
collector --> tempo
|
||||
collector --> elastic
|
||||
|
||||
style nodes fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style backends fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style collector fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
### 2.2.1 OTLP/gRPC (Recommended)
|
||||
|
||||
```cpp
|
||||
// Configuration for OTLP over gRPC
|
||||
namespace otlp = opentelemetry::exporter::otlp;
|
||||
|
||||
otlp::OtlpGrpcExporterOptions opts;
|
||||
opts.endpoint = "localhost:4317";
|
||||
opts.use_ssl_credentials = true;
|
||||
opts.ssl_credentials_cacert_path = "/path/to/ca.crt";
|
||||
```
|
||||
|
||||
### 2.2.2 OTLP/HTTP (Alternative)
|
||||
|
||||
```cpp
|
||||
// Configuration for OTLP over HTTP
|
||||
namespace otlp = opentelemetry::exporter::otlp;
|
||||
|
||||
otlp::OtlpHttpExporterOptions opts;
|
||||
opts.url = "http://localhost:4318/v1/traces";
|
||||
opts.content_type = otlp::HttpRequestContentType::kJson; // or kBinary
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2.3 Span Naming Conventions
|
||||
|
||||
### 2.3.1 Naming Schema
|
||||
|
||||
```
|
||||
<component>.<operation>[.<sub-operation>]
|
||||
```
|
||||
|
||||
**Examples**:
|
||||
- `tx.receive` - Transaction received from peer
|
||||
- `consensus.phase.establish` - Consensus establish phase
|
||||
- `rpc.command.server_info` - server_info RPC command
|
||||
|
||||
### 2.3.2 Complete Span Catalog
|
||||
|
||||
```yaml
|
||||
# Transaction Spans
|
||||
tx:
|
||||
receive: "Transaction received from network"
|
||||
validate: "Transaction signature/format validation"
|
||||
process: "Full transaction processing"
|
||||
relay: "Transaction relay to peers"
|
||||
apply: "Apply transaction to ledger"
|
||||
|
||||
# Consensus Spans
|
||||
consensus:
|
||||
round: "Complete consensus round"
|
||||
phase:
|
||||
open: "Open phase - collecting transactions"
|
||||
establish: "Establish phase - reaching agreement"
|
||||
accept: "Accept phase - applying consensus"
|
||||
proposal:
|
||||
receive: "Receive peer proposal"
|
||||
send: "Send our proposal"
|
||||
validation:
|
||||
receive: "Receive peer validation"
|
||||
send: "Send our validation"
|
||||
|
||||
# RPC Spans
|
||||
rpc:
|
||||
request: "HTTP/WebSocket request handling"
|
||||
command:
|
||||
"*": "Specific RPC command (dynamic)"
|
||||
|
||||
# Peer Spans
|
||||
peer:
|
||||
connect: "Peer connection establishment"
|
||||
disconnect: "Peer disconnection"
|
||||
message:
|
||||
send: "Send protocol message"
|
||||
receive: "Receive protocol message"
|
||||
|
||||
# Ledger Spans
|
||||
ledger:
|
||||
acquire: "Ledger acquisition from network"
|
||||
build: "Build new ledger"
|
||||
validate: "Ledger validation"
|
||||
close: "Close ledger"
|
||||
|
||||
# Job Spans
|
||||
job:
|
||||
enqueue: "Job added to queue"
|
||||
execute: "Job execution"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2.4 Attribute Schema
|
||||
|
||||
### 2.4.1 Resource Attributes (Set Once at Startup)
|
||||
|
||||
```cpp
|
||||
// Standard OpenTelemetry semantic conventions
|
||||
resource::SemanticConventions::SERVICE_NAME = "rippled"
|
||||
resource::SemanticConventions::SERVICE_VERSION = BuildInfo::getVersionString()
|
||||
resource::SemanticConventions::SERVICE_INSTANCE_ID = <node_public_key_base58>
|
||||
|
||||
// Custom rippled attributes
|
||||
"xrpl.network.id" = <network_id> // e.g., 0 for mainnet
|
||||
"xrpl.network.type" = "mainnet" | "testnet" | "devnet" | "standalone"
|
||||
"xrpl.node.type" = "validator" | "stock" | "reporting"
|
||||
"xrpl.node.cluster" = <cluster_name> // If clustered
|
||||
```
|
||||
|
||||
### 2.4.2 Span Attributes by Category
|
||||
|
||||
#### Transaction Attributes
|
||||
```cpp
|
||||
"xrpl.tx.hash" = string // Transaction hash (hex)
|
||||
"xrpl.tx.type" = string // "Payment", "OfferCreate", etc.
|
||||
"xrpl.tx.account" = string // Source account (redacted in prod)
|
||||
"xrpl.tx.sequence" = int64 // Account sequence number
|
||||
"xrpl.tx.fee" = int64 // Fee in drops
|
||||
"xrpl.tx.result" = string // "tesSUCCESS", "tecPATH_DRY", etc.
|
||||
"xrpl.tx.ledger_index" = int64 // Ledger containing transaction
|
||||
```
|
||||
|
||||
#### Consensus Attributes
|
||||
```cpp
|
||||
"xrpl.consensus.round" = int64 // Round number
|
||||
"xrpl.consensus.phase" = string // "open", "establish", "accept"
|
||||
"xrpl.consensus.mode" = string // "proposing", "observing", etc.
|
||||
"xrpl.consensus.proposers" = int64 // Number of proposers
|
||||
"xrpl.consensus.ledger.prev" = string // Previous ledger hash
|
||||
"xrpl.consensus.ledger.seq" = int64 // Ledger sequence
|
||||
"xrpl.consensus.tx_count" = int64 // Transactions in consensus set
|
||||
"xrpl.consensus.duration_ms" = float64 // Round duration
|
||||
```
|
||||
|
||||
#### RPC Attributes
|
||||
```cpp
|
||||
"xrpl.rpc.command" = string // Command name
|
||||
"xrpl.rpc.version" = int64 // API version
|
||||
"xrpl.rpc.role" = string // "admin" or "user"
|
||||
"xrpl.rpc.params" = string // Sanitized parameters (optional)
|
||||
```
|
||||
|
||||
#### Peer & Message Attributes
|
||||
```cpp
|
||||
"xrpl.peer.id" = string // Peer public key (base58)
|
||||
"xrpl.peer.address" = string // IP:port
|
||||
"xrpl.peer.latency_ms" = float64 // Measured latency
|
||||
"xrpl.peer.cluster" = string // Cluster name if clustered
|
||||
"xrpl.message.type" = string // Protocol message type name
|
||||
"xrpl.message.size_bytes" = int64 // Message size
|
||||
"xrpl.message.compressed" = bool // Whether compressed
|
||||
```
|
||||
|
||||
#### Ledger & Job Attributes
|
||||
```cpp
|
||||
"xrpl.ledger.hash" = string // Ledger hash
|
||||
"xrpl.ledger.index" = int64 // Ledger sequence/index
|
||||
"xrpl.ledger.close_time" = int64 // Close time (epoch)
|
||||
"xrpl.ledger.tx_count" = int64 // Transaction count
|
||||
"xrpl.job.type" = string // Job type name
|
||||
"xrpl.job.queue_ms" = float64 // Time spent in queue
|
||||
"xrpl.job.worker" = int64 // Worker thread ID
|
||||
```
|
||||
|
||||
### 2.4.3 Data Collection Summary
|
||||
|
||||
The following table summarizes what data is collected by category:
|
||||
|
||||
| Category | Attributes Collected | Purpose |
|
||||
| --------------- | -------------------------------------------------------------------- | --------------------------- |
|
||||
| **Transaction** | `tx.hash`, `tx.type`, `tx.result`, `tx.fee`, `ledger_index` | Trace transaction lifecycle |
|
||||
| **Consensus** | `round`, `phase`, `mode`, `proposers` (public keys), `duration_ms` | Analyze consensus timing |
|
||||
| **RPC** | `command`, `version`, `status`, `duration_ms` | Monitor RPC performance |
|
||||
| **Peer** | `peer.id` (public key), `latency_ms`, `message.type`, `message.size` | Network topology analysis |
|
||||
| **Ledger** | `ledger.hash`, `ledger.index`, `close_time`, `tx_count` | Ledger progression tracking |
|
||||
| **Job** | `job.type`, `queue_ms`, `worker` | JobQueue performance |
|
||||
|
||||
### 2.4.4 Privacy & Sensitive Data Policy
|
||||
|
||||
OpenTelemetry instrumentation is designed to collect **operational metadata only**, never sensitive content.
|
||||
|
||||
#### Data NOT Collected
|
||||
|
||||
The following data is explicitly **excluded** from telemetry collection:
|
||||
|
||||
| Excluded Data | Reason |
|
||||
| ----------------------- | ----------------------------------------- |
|
||||
| **Private Keys** | Never exposed; not relevant to tracing |
|
||||
| **Account Balances** | Financial data; privacy sensitive |
|
||||
| **Transaction Amounts** | Financial data; privacy sensitive |
|
||||
| **Raw TX Payloads** | May contain sensitive memo/data fields |
|
||||
| **Personal Data** | No PII collected |
|
||||
| **IP Addresses** | Configurable; excluded by default in prod |
|
||||
|
||||
#### Privacy Protection Mechanisms
|
||||
|
||||
| Mechanism | Description |
|
||||
| ----------------------------- | ------------------------------------------------------------------------- |
|
||||
| **Account Hashing** | `xrpl.tx.account` is hashed at collector level before storage |
|
||||
| **Configurable Redaction** | Sensitive fields can be excluded via `[telemetry]` config section |
|
||||
| **Sampling** | Only 10% of traces recorded by default, reducing data exposure |
|
||||
| **Local Control** | Node operators have full control over what gets exported |
|
||||
| **No Raw Payloads** | Transaction content is never recorded, only metadata (hash, type, result) |
|
||||
| **Collector-Level Filtering** | Additional redaction/hashing can be configured at OTel Collector |
|
||||
|
||||
#### Collector-Level Data Protection
|
||||
|
||||
The OpenTelemetry Collector can be configured to hash or redact sensitive attributes before export:
|
||||
|
||||
```yaml
|
||||
processors:
|
||||
attributes:
|
||||
actions:
|
||||
# Hash account addresses before storage
|
||||
- key: xrpl.tx.account
|
||||
action: hash
|
||||
# Remove IP addresses entirely
|
||||
- key: xrpl.peer.address
|
||||
action: delete
|
||||
# Redact specific fields
|
||||
- key: xrpl.rpc.params
|
||||
action: delete
|
||||
```
|
||||
|
||||
#### Configuration Options for Privacy
|
||||
|
||||
In `rippled.cfg`, operators can control data collection granularity:
|
||||
|
||||
```ini
|
||||
[telemetry]
|
||||
enabled=1
|
||||
|
||||
# Disable collection of specific components
|
||||
trace_transactions=1
|
||||
trace_consensus=1
|
||||
trace_rpc=1
|
||||
trace_peer=0 # Disable peer tracing (high volume, includes addresses)
|
||||
|
||||
# Redact specific attributes
|
||||
redact_account=1 # Hash account addresses before export
|
||||
redact_peer_address=1 # Remove peer IP addresses
|
||||
```
|
||||
|
||||
> **Key Principle**: Telemetry collects **operational metadata** (timing, counts, hashes) — never **sensitive content** (keys, balances, amounts, raw payloads).
|
||||
|
||||
---
|
||||
|
||||
## 2.5 Context Propagation Design
|
||||
|
||||
### 2.5.1 Propagation Boundaries
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph http["HTTP/WebSocket (RPC)"]
|
||||
w3c["W3C Trace Context Headers:<br/>traceparent: 00-{trace_id}-{span_id}-{flags}<br/>tracestate: rippled=<state>"]
|
||||
end
|
||||
|
||||
subgraph protobuf["Protocol Buffers (P2P)"]
|
||||
proto["message TraceContext {<br/> bytes trace_id = 1; // 16 bytes<br/> bytes span_id = 2; // 8 bytes<br/> uint32 trace_flags = 3;<br/> string trace_state = 4;<br/>}"]
|
||||
end
|
||||
|
||||
subgraph jobqueue["JobQueue (Internal Async)"]
|
||||
job["Context captured at job creation,<br/>restored at execution<br/><br/>class Job {<br/> opentelemetry::context::Context traceContext_;<br/>};"]
|
||||
end
|
||||
|
||||
style http fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style protobuf fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style jobqueue fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2.6 Integration with Existing Observability
|
||||
|
||||
### 2.6.1 Existing Frameworks Comparison
|
||||
|
||||
rippled already has two observability mechanisms. OpenTelemetry complements (not replaces) them:
|
||||
|
||||
| Aspect | PerfLog | Beast Insight (StatsD) | OpenTelemetry |
|
||||
| --------------------- | ----------------------------- | ---------------------------- | ------------------------- |
|
||||
| **Type** | Logging | Metrics | Distributed Tracing |
|
||||
| **Data** | JSON log entries | Counters, gauges, histograms | Spans with context |
|
||||
| **Scope** | Single node | Single node | **Cross-node** |
|
||||
| **Output** | `perf.log` file | StatsD server | OTLP Collector |
|
||||
| **Question answered** | "What happened on this node?" | "How many? How fast?" | "What was the journey?" |
|
||||
| **Correlation** | By timestamp | By metric name | By `trace_id` |
|
||||
| **Overhead** | Low (file I/O) | Low (UDP packets) | Low-Medium (configurable) |
|
||||
|
||||
### 2.6.2 What Each Framework Does Best
|
||||
|
||||
#### PerfLog
|
||||
- **Purpose**: Detailed local event logging for RPC and job execution
|
||||
- **Strengths**:
|
||||
- Rich JSON output with timing data
|
||||
- Already integrated in RPC handlers
|
||||
- File-based, no external dependencies
|
||||
- **Limitations**:
|
||||
- Single-node only (no cross-node correlation)
|
||||
- No parent-child relationships between events
|
||||
- Manual log parsing required
|
||||
|
||||
```json
|
||||
// Example PerfLog entry
|
||||
{
|
||||
"time": "2024-01-15T10:30:00.123Z",
|
||||
"method": "submit",
|
||||
"duration_us": 1523,
|
||||
"result": "tesSUCCESS"
|
||||
}
|
||||
```
|
||||
|
||||
#### Beast Insight (StatsD)
|
||||
- **Purpose**: Real-time metrics for monitoring dashboards
|
||||
- **Strengths**:
|
||||
- Aggregated metrics (counters, gauges, histograms)
|
||||
- Low overhead (UDP, fire-and-forget)
|
||||
- Good for alerting thresholds
|
||||
- **Limitations**:
|
||||
- No request-level detail
|
||||
- No causal relationships
|
||||
- Single-node perspective
|
||||
|
||||
```cpp
|
||||
// Example StatsD usage in rippled
|
||||
insight.increment("rpc.submit.count");
|
||||
insight.gauge("ledger.age", age);
|
||||
insight.timing("consensus.round", duration);
|
||||
```
|
||||
|
||||
#### OpenTelemetry (NEW)
|
||||
- **Purpose**: Distributed request tracing across nodes
|
||||
- **Strengths**:
|
||||
- **Cross-node correlation** via `trace_id`
|
||||
- Parent-child span relationships
|
||||
- Rich attributes per span
|
||||
- Industry standard (CNCF)
|
||||
- **Limitations**:
|
||||
- Requires collector infrastructure
|
||||
- Higher complexity than logging
|
||||
|
||||
```cpp
|
||||
// Example OpenTelemetry span
|
||||
auto span = telemetry.startSpan("tx.relay");
|
||||
span->SetAttribute("tx.hash", hash);
|
||||
span->SetAttribute("peer.id", peerId);
|
||||
// Span automatically linked to parent via context
|
||||
```
|
||||
|
||||
### 2.6.3 When to Use Each
|
||||
|
||||
| Scenario | PerfLog | StatsD | OpenTelemetry |
|
||||
| --------------------------------------- | --------- | ------ | ------------- |
|
||||
| "How many TXs per second?" | ❌ | ✅ | ❌ |
|
||||
| "What's the p99 RPC latency?" | ❌ | ✅ | ✅ |
|
||||
| "Why was this specific TX slow?" | ⚠️ partial | ❌ | ✅ |
|
||||
| "Which node delayed consensus?" | ❌ | ❌ | ✅ |
|
||||
| "What happened on node X at time T?" | ✅ | ❌ | ✅ |
|
||||
| "Show me the TX journey across 5 nodes" | ❌ | ❌ | ✅ |
|
||||
|
||||
### 2.6.4 Coexistence Strategy
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph rippled["rippled Process"]
|
||||
perflog["PerfLog<br/>(JSON to file)"]
|
||||
insight["Beast Insight<br/>(StatsD)"]
|
||||
otel["OpenTelemetry<br/>(Tracing)"]
|
||||
end
|
||||
|
||||
perflog --> perffile["perf.log"]
|
||||
insight --> statsd["StatsD Server"]
|
||||
otel --> collector["OTLP Collector"]
|
||||
|
||||
perffile --> grafana["Grafana<br/>(Unified UI)"]
|
||||
statsd --> grafana
|
||||
collector --> grafana
|
||||
|
||||
style rippled fill:#212121,stroke:#0a0a0a,color:#ffffff
|
||||
style grafana fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
```
|
||||
|
||||
### 2.6.5 Correlation with PerfLog
|
||||
|
||||
Trace IDs can be correlated with existing PerfLog entries for comprehensive debugging:
|
||||
|
||||
```cpp
|
||||
// In RPCHandler.cpp - correlate trace with PerfLog
|
||||
Status doCommand(RPC::JsonContext& context, Json::Value& result)
|
||||
{
|
||||
// Start OpenTelemetry span
|
||||
auto span = context.app.getTelemetry().startSpan(
|
||||
"rpc.command." + context.method);
|
||||
|
||||
// Get trace ID for correlation
|
||||
auto traceId = span->GetContext().trace_id().IsValid()
|
||||
? toHex(span->GetContext().trace_id())
|
||||
: "";
|
||||
|
||||
// Use existing PerfLog with trace correlation
|
||||
auto const curId = context.app.getPerfLog().currentId();
|
||||
context.app.getPerfLog().rpcStart(context.method, curId);
|
||||
|
||||
// Future: Add trace ID to PerfLog entry
|
||||
// context.app.getPerfLog().setTraceId(curId, traceId);
|
||||
|
||||
try {
|
||||
auto ret = handler(context, result);
|
||||
context.app.getPerfLog().rpcFinish(context.method, curId);
|
||||
span->SetStatus(opentelemetry::trace::StatusCode::kOk);
|
||||
return ret;
|
||||
} catch (std::exception const& e) {
|
||||
context.app.getPerfLog().rpcError(context.method, curId);
|
||||
span->RecordException(e);
|
||||
span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Architecture Analysis](./01-architecture-analysis.md)* | *Next: [Implementation Strategy](./03-implementation-strategy.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
448
OpenTelemetryPlan/03-implementation-strategy.md
Normal file
448
OpenTelemetryPlan/03-implementation-strategy.md
Normal file
@@ -0,0 +1,448 @@
|
||||
# Implementation Strategy
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Code Samples](./04-code-samples.md) | [Configuration Reference](./05-configuration-reference.md)
|
||||
|
||||
---
|
||||
|
||||
## 3.1 Directory Structure
|
||||
|
||||
The telemetry implementation follows rippled's existing code organization pattern:
|
||||
|
||||
```
|
||||
include/xrpl/
|
||||
├── telemetry/
|
||||
│ ├── Telemetry.h # Main telemetry interface
|
||||
│ ├── TelemetryConfig.h # Configuration structures
|
||||
│ ├── TraceContext.h # Context propagation utilities
|
||||
│ ├── SpanGuard.h # RAII span management
|
||||
│ └── SpanAttributes.h # Attribute helper functions
|
||||
|
||||
src/libxrpl/
|
||||
├── telemetry/
|
||||
│ ├── Telemetry.cpp # Implementation
|
||||
│ ├── TelemetryConfig.cpp # Config parsing
|
||||
│ ├── TraceContext.cpp # Context serialization
|
||||
│ └── NullTelemetry.cpp # No-op implementation
|
||||
|
||||
src/xrpld/
|
||||
├── telemetry/
|
||||
│ ├── TracingInstrumentation.h # Instrumentation macros
|
||||
│ └── TracingInstrumentation.cpp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3.2 Implementation Approach
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
%%{init: {'flowchart': {'nodeSpacing': 20, 'rankSpacing': 30}}}%%
|
||||
flowchart TB
|
||||
subgraph phase1["Phase 1: Core"]
|
||||
direction LR
|
||||
sdk["SDK Integration"] ~~~ interface["Telemetry Interface"] ~~~ config["Configuration"]
|
||||
end
|
||||
|
||||
subgraph phase2["Phase 2: RPC"]
|
||||
direction LR
|
||||
http["HTTP Context"] ~~~ rpc["RPC Handlers"]
|
||||
end
|
||||
|
||||
subgraph phase3["Phase 3: P2P"]
|
||||
direction LR
|
||||
proto["Protobuf Context"] ~~~ tx["Transaction Relay"]
|
||||
end
|
||||
|
||||
subgraph phase4["Phase 4: Consensus"]
|
||||
direction LR
|
||||
consensus["Consensus Rounds"] ~~~ proposals["Proposals"]
|
||||
end
|
||||
|
||||
phase1 --> phase2 --> phase3 --> phase4
|
||||
|
||||
style phase1 fill:#1565c0,stroke:#0d47a1,color:#ffffff
|
||||
style phase2 fill:#2e7d32,stroke:#1b5e20,color:#ffffff
|
||||
style phase3 fill:#e65100,stroke:#bf360c,color:#ffffff
|
||||
style phase4 fill:#c2185b,stroke:#880e4f,color:#ffffff
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
### Key Principles
|
||||
|
||||
1. **Minimal Intrusion**: Instrumentation should not alter existing control flow
|
||||
2. **Zero-Cost When Disabled**: Use compile-time flags and no-op implementations
|
||||
3. **Backward Compatibility**: Protocol Buffer extensions use high field numbers
|
||||
4. **Graceful Degradation**: Tracing failures must not affect node operation
|
||||
|
||||
---
|
||||
|
||||
## 3.3 Performance Overhead Summary
|
||||
|
||||
| Metric | Overhead | Notes |
|
||||
| ------------- | ---------- | ----------------------------------- |
|
||||
| CPU | 1-3% | Span creation and attribute setting |
|
||||
| Memory | 2-5 MB | Batch buffer for pending spans |
|
||||
| Network | 10-50 KB/s | Compressed OTLP export to collector |
|
||||
| Latency (p99) | <2% | With proper sampling configuration |
|
||||
|
||||
---
|
||||
|
||||
## 3.4 Detailed CPU Overhead Analysis
|
||||
|
||||
### 3.4.1 Per-Operation Costs
|
||||
|
||||
| Operation | Time (ns) | Frequency | Impact |
|
||||
| --------------------- | --------- | ---------------------- | ---------- |
|
||||
| Span creation | 200-500 | Every traced operation | Low |
|
||||
| Span end | 100-200 | Every traced operation | Low |
|
||||
| SetAttribute (string) | 80-120 | 3-5 per span | Low |
|
||||
| SetAttribute (int) | 40-60 | 2-3 per span | Negligible |
|
||||
| AddEvent | 50-80 | 0-2 per span | Negligible |
|
||||
| Context injection | 150-250 | Per outgoing message | Low |
|
||||
| Context extraction | 100-180 | Per incoming message | Low |
|
||||
| GetCurrent context | 10-20 | Thread-local access | Negligible |
|
||||
|
||||
### 3.4.2 Transaction Processing Overhead
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
%%{init: {'pie': {'textPosition': 0.75}}}%%
|
||||
pie showData
|
||||
"tx.receive (800ns)" : 800
|
||||
"tx.validate (500ns)" : 500
|
||||
"tx.relay (500ns)" : 500
|
||||
"Context inject (600ns)" : 600
|
||||
```
|
||||
|
||||
**Transaction Tracing Overhead (~2.4μs total)**
|
||||
|
||||
</div>
|
||||
|
||||
**Overhead percentage**: 2.4 μs / 200 μs (avg tx processing) = **~1.2%**
|
||||
|
||||
### 3.4.3 Consensus Round Overhead
|
||||
|
||||
| Operation | Count | Cost (ns) | Total |
|
||||
| ---------------------- | ----- | --------- | ---------- |
|
||||
| consensus.round span | 1 | ~1000 | ~1 μs |
|
||||
| consensus.phase spans | 3 | ~700 | ~2.1 μs |
|
||||
| proposal.receive spans | ~20 | ~600 | ~12 μs |
|
||||
| proposal.send spans | ~3 | ~600 | ~1.8 μs |
|
||||
| Context operations | ~30 | ~200 | ~6 μs |
|
||||
| **TOTAL** | | | **~23 μs** |
|
||||
|
||||
**Overhead percentage**: 23 μs / 3s (typical round) = **~0.0008%** (negligible)
|
||||
|
||||
### 3.4.4 RPC Request Overhead
|
||||
|
||||
| Operation | Cost (ns) |
|
||||
| ---------------- | ------------ |
|
||||
| rpc.request span | ~700 |
|
||||
| rpc.command span | ~600 |
|
||||
| Context extract | ~250 |
|
||||
| Context inject | ~200 |
|
||||
| **TOTAL** | **~1.75 μs** |
|
||||
|
||||
- Fast RPC (1ms): 1.75 μs / 1ms = **~0.175%**
|
||||
- Slow RPC (100ms): 1.75 μs / 100ms = **~0.002%**
|
||||
|
||||
---
|
||||
|
||||
## 3.5 Memory Overhead Analysis
|
||||
|
||||
### 3.5.1 Static Memory
|
||||
|
||||
| Component | Size | Allocated |
|
||||
| ------------------------ | ----------- | ---------- |
|
||||
| TracerProvider singleton | ~64 KB | At startup |
|
||||
| BatchSpanProcessor | ~128 KB | At startup |
|
||||
| OTLP exporter | ~256 KB | At startup |
|
||||
| Propagator registry | ~8 KB | At startup |
|
||||
| **Total static** | **~456 KB** | |
|
||||
|
||||
### 3.5.2 Dynamic Memory
|
||||
|
||||
| Component | Size per unit | Max units | Peak |
|
||||
| -------------------- | ------------- | ---------- | ----------- |
|
||||
| Active span | ~200 bytes | 1000 | ~200 KB |
|
||||
| Queued span (export) | ~500 bytes | 2048 | ~1 MB |
|
||||
| Attribute storage | ~50 bytes | 5 per span | Included |
|
||||
| Context storage | ~64 bytes | Per thread | ~6.4 KB |
|
||||
| **Total dynamic** | | | **~1.2 MB** |
|
||||
|
||||
### 3.5.3 Memory Growth Characteristics
|
||||
|
||||
```mermaid
|
||||
---
|
||||
config:
|
||||
xyChart:
|
||||
width: 700
|
||||
height: 400
|
||||
---
|
||||
xychart-beta
|
||||
title "Memory Usage vs Span Rate"
|
||||
x-axis "Spans/second" [0, 200, 400, 600, 800, 1000]
|
||||
y-axis "Memory (MB)" 0 --> 6
|
||||
line [1, 1.8, 2.6, 3.4, 4.2, 5]
|
||||
```
|
||||
|
||||
**Notes**:
|
||||
- Memory increases linearly with span rate
|
||||
- Batch export prevents unbounded growth
|
||||
- Queue size is configurable (default 2048 spans)
|
||||
- At queue limit, oldest spans are dropped (not blocked)
|
||||
|
||||
---
|
||||
|
||||
## 3.6 Network Overhead Analysis
|
||||
|
||||
### 3.6.1 Export Bandwidth
|
||||
|
||||
| Sampling Rate | Spans/sec | Bandwidth | Notes |
|
||||
| ------------- | --------- | --------- | ---------------- |
|
||||
| 100% | ~500 | ~250 KB/s | Development only |
|
||||
| 10% | ~50 | ~25 KB/s | Staging |
|
||||
| 1% | ~5 | ~2.5 KB/s | Production |
|
||||
| Error-only | ~1 | ~0.5 KB/s | Minimal overhead |
|
||||
|
||||
### 3.6.2 Trace Context Propagation
|
||||
|
||||
| Message Type | Context Size | Messages/sec | Overhead |
|
||||
| ---------------------- | ------------ | ------------ | ----------- |
|
||||
| TMTransaction | 32 bytes | ~100 | ~3.2 KB/s |
|
||||
| TMProposeSet | 32 bytes | ~10 | ~320 B/s |
|
||||
| TMValidation | 32 bytes | ~50 | ~1.6 KB/s |
|
||||
| **Total P2P overhead** | | | **~5 KB/s** |
|
||||
|
||||
---
|
||||
|
||||
## 3.7 Optimization Strategies
|
||||
|
||||
### 3.7.1 Sampling Strategies
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
trace["New Trace"]
|
||||
|
||||
trace --> errors{"Is Error?"}
|
||||
errors -->|Yes| sample["SAMPLE"]
|
||||
errors -->|No| consensus{"Is Consensus?"}
|
||||
|
||||
consensus -->|Yes| sample
|
||||
consensus -->|No| slow{"Is Slow?"}
|
||||
|
||||
slow -->|Yes| sample
|
||||
slow -->|No| prob{"Random < 10%?"}
|
||||
|
||||
prob -->|Yes| sample
|
||||
prob -->|No| drop["DROP"]
|
||||
|
||||
style sample fill:#4caf50,stroke:#388e3c,color:#fff
|
||||
style drop fill:#f44336,stroke:#c62828,color:#fff
|
||||
```
|
||||
|
||||
### 3.7.2 Batch Tuning Recommendations
|
||||
|
||||
| Environment | Batch Size | Batch Delay | Max Queue |
|
||||
| ------------------ | ---------- | ----------- | --------- |
|
||||
| Low-latency | 128 | 1000ms | 512 |
|
||||
| High-throughput | 1024 | 10000ms | 8192 |
|
||||
| Memory-constrained | 256 | 2000ms | 512 |
|
||||
|
||||
### 3.7.3 Conditional Instrumentation
|
||||
|
||||
```cpp
|
||||
// Compile-time feature flag
|
||||
#ifndef XRPL_ENABLE_TELEMETRY
|
||||
// Zero-cost when disabled
|
||||
#define XRPL_TRACE_SPAN(t, n) ((void)0)
|
||||
#endif
|
||||
|
||||
// Runtime component filtering
|
||||
if (telemetry.shouldTracePeer())
|
||||
{
|
||||
XRPL_TRACE_SPAN(telemetry, "peer.message.receive");
|
||||
// ... instrumentation
|
||||
}
|
||||
// No overhead when component tracing disabled
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3.8 Links to Detailed Documentation
|
||||
|
||||
- **[Code Samples](./04-code-samples.md)**: Complete implementation code for all components
|
||||
- **[Configuration Reference](./05-configuration-reference.md)**: Configuration options and collector setup
|
||||
- **[Implementation Phases](./06-implementation-phases.md)**: Detailed timeline and milestones
|
||||
|
||||
---
|
||||
|
||||
## 3.9 Code Intrusiveness Assessment
|
||||
|
||||
This section provides a detailed assessment of how intrusive the OpenTelemetry integration is to the existing rippled codebase.
|
||||
|
||||
### 3.9.1 Files Modified Summary
|
||||
|
||||
| Component | Files Modified | Lines Added | Lines Changed | Architectural Impact |
|
||||
| --------------------- | -------------- | ----------- | ------------- | -------------------- |
|
||||
| **Core Telemetry** | 5 new files | ~800 | 0 | None (new module) |
|
||||
| **Application Init** | 2 files | ~30 | ~5 | Minimal |
|
||||
| **RPC Layer** | 3 files | ~80 | ~20 | Minimal |
|
||||
| **Transaction Relay** | 4 files | ~120 | ~40 | Low |
|
||||
| **Consensus** | 3 files | ~100 | ~30 | Low-Medium |
|
||||
| **Protocol Buffers** | 1 file | ~25 | 0 | Low |
|
||||
| **CMake/Build** | 3 files | ~50 | ~10 | Minimal |
|
||||
| **Total** | **~21 files** | **~1,205** | **~105** | **Low** |
|
||||
|
||||
### 3.9.2 Detailed File Impact
|
||||
|
||||
```mermaid
|
||||
pie title Code Changes by Component
|
||||
"New Telemetry Module" : 800
|
||||
"Transaction Relay" : 160
|
||||
"Consensus" : 130
|
||||
"RPC Layer" : 100
|
||||
"Application Init" : 35
|
||||
"Protocol Buffers" : 25
|
||||
"Build System" : 60
|
||||
```
|
||||
|
||||
#### New Files (No Impact on Existing Code)
|
||||
|
||||
| File | Lines | Purpose |
|
||||
| ---------------------------------------------- | ----- | -------------------- |
|
||||
| `include/xrpl/telemetry/Telemetry.h` | ~160 | Main interface |
|
||||
| `include/xrpl/telemetry/SpanGuard.h` | ~120 | RAII wrapper |
|
||||
| `include/xrpl/telemetry/TraceContext.h` | ~80 | Context propagation |
|
||||
| `src/xrpld/telemetry/TracingInstrumentation.h` | ~60 | Macros |
|
||||
| `src/libxrpl/telemetry/Telemetry.cpp` | ~200 | Implementation |
|
||||
| `src/libxrpl/telemetry/TelemetryConfig.cpp` | ~60 | Config parsing |
|
||||
| `src/libxrpl/telemetry/NullTelemetry.cpp` | ~40 | No-op implementation |
|
||||
|
||||
#### Modified Files (Existing Rippled Code)
|
||||
|
||||
| File | Lines Added | Lines Changed | Risk Level |
|
||||
| ------------------------------------------------- | ----------- | ------------- | ---------- |
|
||||
| `src/xrpld/app/main/Application.cpp` | ~15 | ~3 | Low |
|
||||
| `include/xrpl/app/main/Application.h` | ~5 | ~2 | Low |
|
||||
| `src/xrpld/rpc/detail/ServerHandler.cpp` | ~40 | ~10 | Low |
|
||||
| `src/xrpld/rpc/handlers/*.cpp` | ~30 | ~8 | Low |
|
||||
| `src/xrpld/overlay/detail/PeerImp.cpp` | ~60 | ~15 | Medium |
|
||||
| `src/xrpld/overlay/detail/OverlayImpl.cpp` | ~30 | ~10 | Medium |
|
||||
| `src/xrpld/app/consensus/RCLConsensus.cpp` | ~50 | ~15 | Medium |
|
||||
| `src/xrpld/app/consensus/RCLConsensusAdaptor.cpp` | ~40 | ~12 | Medium |
|
||||
| `src/xrpld/core/JobQueue.cpp` | ~20 | ~5 | Low |
|
||||
| `src/xrpld/overlay/detail/ripple.proto` | ~25 | 0 | Low |
|
||||
| `CMakeLists.txt` | ~40 | ~8 | Low |
|
||||
| `cmake/FindOpenTelemetry.cmake` | ~50 | 0 | None (new) |
|
||||
|
||||
### 3.9.3 Risk Assessment by Component
|
||||
|
||||
<div align="center">
|
||||
|
||||
**Do First** ↖ ↗ **Plan Carefully**
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Code Intrusiveness Risk Matrix
|
||||
x-axis Low Risk --> High Risk
|
||||
y-axis Low Value --> High Value
|
||||
|
||||
RPC Tracing: [0.2, 0.8]
|
||||
Transaction Relay: [0.5, 0.9]
|
||||
Consensus Tracing: [0.7, 0.95]
|
||||
Peer Message Tracing: [0.8, 0.4]
|
||||
JobQueue Context: [0.4, 0.5]
|
||||
Ledger Acquisition: [0.5, 0.6]
|
||||
```
|
||||
|
||||
**Optional** ↙ ↘ **Avoid**
|
||||
|
||||
</div>
|
||||
|
||||
#### Risk Level Definitions
|
||||
|
||||
| Risk Level | Definition | Mitigation |
|
||||
| ---------- | ---------------------------------------------------------------- | ---------------------------------- |
|
||||
| **Low** | Additive changes only; no modification to existing logic | Standard code review |
|
||||
| **Medium** | Minor modifications to existing functions; clear boundaries | Comprehensive unit tests |
|
||||
| **High** | Changes to core logic or data structures; potential side effects | Integration tests + staged rollout |
|
||||
|
||||
### 3.9.4 Architectural Impact Assessment
|
||||
|
||||
| Aspect | Impact | Justification |
|
||||
| -------------------- | ------- | --------------------------------------------------------------------- |
|
||||
| **Data Flow** | None | Tracing is purely observational; no business logic changes |
|
||||
| **Threading Model** | Minimal | Context propagation uses thread-local storage (standard OTel pattern) |
|
||||
| **Memory Model** | Low | Bounded queues prevent unbounded growth; RAII ensures cleanup |
|
||||
| **Network Protocol** | Low | Optional fields in protobuf (high field numbers); backward compatible |
|
||||
| **Configuration** | None | New config section; existing configs unaffected |
|
||||
| **Build System** | Low | Optional CMake flag; builds work without OpenTelemetry |
|
||||
| **Dependencies** | Low | OpenTelemetry SDK is optional; null implementation when disabled |
|
||||
|
||||
### 3.9.5 Backward Compatibility
|
||||
|
||||
| Compatibility | Status | Notes |
|
||||
| --------------- | ------ | ----------------------------------------------------- |
|
||||
| **Config File** | ✅ Full | New `[telemetry]` section is optional |
|
||||
| **Protocol** | ✅ Full | Optional protobuf fields with high field numbers |
|
||||
| **Build** | ✅ Full | `XRPL_ENABLE_TELEMETRY=OFF` produces identical binary |
|
||||
| **Runtime** | ✅ Full | `enabled=0` produces zero overhead |
|
||||
| **API** | ✅ Full | No changes to public RPC or P2P APIs |
|
||||
|
||||
### 3.9.6 Rollback Strategy
|
||||
|
||||
If issues are discovered after deployment:
|
||||
|
||||
1. **Immediate**: Set `enabled=0` in config and restart (zero code change)
|
||||
2. **Quick**: Rebuild with `XRPL_ENABLE_TELEMETRY=OFF`
|
||||
3. **Complete**: Revert telemetry commits (clean separation makes this easy)
|
||||
|
||||
### 3.9.7 Code Change Examples
|
||||
|
||||
**Minimal RPC Instrumentation (Low Intrusiveness):**
|
||||
```cpp
|
||||
// Before
|
||||
void ServerHandler::onRequest(...) {
|
||||
auto result = processRequest(req);
|
||||
send(result);
|
||||
}
|
||||
|
||||
// After (only ~10 lines added)
|
||||
void ServerHandler::onRequest(...) {
|
||||
XRPL_TRACE_RPC(app_.getTelemetry(), "rpc.request"); // +1 line
|
||||
XRPL_TRACE_SET_ATTR("xrpl.rpc.command", command); // +1 line
|
||||
|
||||
auto result = processRequest(req);
|
||||
|
||||
XRPL_TRACE_SET_ATTR("xrpl.rpc.status", status); // +1 line
|
||||
send(result);
|
||||
}
|
||||
```
|
||||
|
||||
**Consensus Instrumentation (Medium Intrusiveness):**
|
||||
```cpp
|
||||
// Before
|
||||
void RCLConsensusAdaptor::startRound(...) {
|
||||
// ... existing logic
|
||||
}
|
||||
|
||||
// After (context storage required)
|
||||
void RCLConsensusAdaptor::startRound(...) {
|
||||
XRPL_TRACE_CONSENSUS(app_.getTelemetry(), "consensus.round");
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.ledger.seq", seq);
|
||||
|
||||
// Store context for child spans in phase transitions
|
||||
currentRoundContext_ = _xrpl_guard_->context(); // New member variable
|
||||
|
||||
// ... existing logic unchanged
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Design Decisions](./02-design-decisions.md)* | *Next: [Code Samples](./04-code-samples.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
982
OpenTelemetryPlan/04-code-samples.md
Normal file
982
OpenTelemetryPlan/04-code-samples.md
Normal file
@@ -0,0 +1,982 @@
|
||||
# Code Samples
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Implementation Strategy](./03-implementation-strategy.md) | [Configuration Reference](./05-configuration-reference.md)
|
||||
|
||||
---
|
||||
|
||||
## 4.1 Core Interfaces
|
||||
|
||||
### 4.1.1 Main Telemetry Interface
|
||||
|
||||
```cpp
|
||||
// include/xrpl/telemetry/Telemetry.h
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/telemetry/TelemetryConfig.h>
|
||||
#include <opentelemetry/trace/tracer.h>
|
||||
#include <opentelemetry/trace/span.h>
|
||||
#include <opentelemetry/context/context.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
/**
|
||||
* Main telemetry interface for OpenTelemetry integration.
|
||||
*
|
||||
* This class provides the primary API for distributed tracing in rippled.
|
||||
* It manages the OpenTelemetry SDK lifecycle and provides convenience
|
||||
* methods for creating spans and propagating context.
|
||||
*/
|
||||
class Telemetry
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Configuration for the telemetry system.
|
||||
*/
|
||||
struct Setup
|
||||
{
|
||||
bool enabled = false;
|
||||
std::string serviceName = "rippled";
|
||||
std::string serviceVersion;
|
||||
std::string serviceInstanceId; // Node public key
|
||||
|
||||
// Exporter configuration
|
||||
std::string exporterType = "otlp_grpc"; // "otlp_grpc", "otlp_http", "none"
|
||||
std::string exporterEndpoint = "localhost:4317";
|
||||
bool useTls = false;
|
||||
std::string tlsCertPath;
|
||||
|
||||
// Sampling configuration
|
||||
double samplingRatio = 1.0; // 1.0 = 100% sampling
|
||||
|
||||
// Batch processor settings
|
||||
std::uint32_t batchSize = 512;
|
||||
std::chrono::milliseconds batchDelay{5000};
|
||||
std::uint32_t maxQueueSize = 2048;
|
||||
|
||||
// Network attributes
|
||||
std::uint32_t networkId = 0;
|
||||
std::string networkType = "mainnet";
|
||||
|
||||
// Component filtering
|
||||
bool traceTransactions = true;
|
||||
bool traceConsensus = true;
|
||||
bool traceRpc = true;
|
||||
bool tracePeer = false; // High volume, disabled by default
|
||||
bool traceLedger = true;
|
||||
};
|
||||
|
||||
virtual ~Telemetry() = default;
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// LIFECYCLE
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
|
||||
/** Start the telemetry system (call after configuration) */
|
||||
virtual void start() = 0;
|
||||
|
||||
/** Stop the telemetry system (flushes pending spans) */
|
||||
virtual void stop() = 0;
|
||||
|
||||
/** Check if telemetry is enabled */
|
||||
virtual bool isEnabled() const = 0;
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// TRACER ACCESS
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
|
||||
/** Get the tracer for creating spans */
|
||||
virtual opentelemetry::nostd::shared_ptr<opentelemetry::trace::Tracer>
|
||||
getTracer(std::string_view name = "rippled") = 0;
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// SPAN CREATION (Convenience Methods)
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
|
||||
/** Start a new span with default options */
|
||||
virtual opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>
|
||||
startSpan(
|
||||
std::string_view name,
|
||||
opentelemetry::trace::SpanKind kind =
|
||||
opentelemetry::trace::SpanKind::kInternal) = 0;
|
||||
|
||||
/** Start a span as child of given context */
|
||||
virtual opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>
|
||||
startSpan(
|
||||
std::string_view name,
|
||||
opentelemetry::context::Context const& parentContext,
|
||||
opentelemetry::trace::SpanKind kind =
|
||||
opentelemetry::trace::SpanKind::kInternal) = 0;
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// CONTEXT PROPAGATION
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
|
||||
/** Serialize context for network transmission */
|
||||
virtual std::string serializeContext(
|
||||
opentelemetry::context::Context const& ctx) = 0;
|
||||
|
||||
/** Deserialize context from network data */
|
||||
virtual opentelemetry::context::Context deserializeContext(
|
||||
std::string const& serialized) = 0;
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
// COMPONENT FILTERING
|
||||
// ═══════════════════════════════════════════════════════════════════════
|
||||
|
||||
/** Check if transaction tracing is enabled */
|
||||
virtual bool shouldTraceTransactions() const = 0;
|
||||
|
||||
/** Check if consensus tracing is enabled */
|
||||
virtual bool shouldTraceConsensus() const = 0;
|
||||
|
||||
/** Check if RPC tracing is enabled */
|
||||
virtual bool shouldTraceRpc() const = 0;
|
||||
|
||||
/** Check if peer message tracing is enabled */
|
||||
virtual bool shouldTracePeer() const = 0;
|
||||
};
|
||||
|
||||
// Factory functions
|
||||
std::unique_ptr<Telemetry>
|
||||
make_Telemetry(
|
||||
Telemetry::Setup const& setup,
|
||||
beast::Journal journal);
|
||||
|
||||
Telemetry::Setup
|
||||
setup_Telemetry(
|
||||
Section const& section,
|
||||
std::string const& nodePublicKey,
|
||||
std::string const& version);
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.2 RAII Span Guard
|
||||
|
||||
```cpp
|
||||
// include/xrpl/telemetry/SpanGuard.h
|
||||
#pragma once
|
||||
|
||||
#include <opentelemetry/trace/span.h>
|
||||
#include <opentelemetry/trace/scope.h>
|
||||
#include <opentelemetry/trace/status.h>
|
||||
|
||||
#include <string_view>
|
||||
#include <exception>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
/**
|
||||
* RAII guard for OpenTelemetry spans.
|
||||
*
|
||||
* Automatically ends the span on destruction and makes it the current
|
||||
* span in the thread-local context.
|
||||
*/
|
||||
class SpanGuard
|
||||
{
|
||||
opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span> span_;
|
||||
opentelemetry::trace::Scope scope_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Construct guard with span.
|
||||
* The span becomes the current span in thread-local context.
|
||||
*/
|
||||
explicit SpanGuard(
|
||||
opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span> span)
|
||||
: span_(std::move(span))
|
||||
, scope_(span_)
|
||||
{
|
||||
}
|
||||
|
||||
// Non-copyable, non-movable
|
||||
SpanGuard(SpanGuard const&) = delete;
|
||||
SpanGuard& operator=(SpanGuard const&) = delete;
|
||||
SpanGuard(SpanGuard&&) = delete;
|
||||
SpanGuard& operator=(SpanGuard&&) = delete;
|
||||
|
||||
~SpanGuard()
|
||||
{
|
||||
if (span_)
|
||||
span_->End();
|
||||
}
|
||||
|
||||
/** Access the underlying span */
|
||||
opentelemetry::trace::Span& span() { return *span_; }
|
||||
opentelemetry::trace::Span const& span() const { return *span_; }
|
||||
|
||||
/** Set span status to OK */
|
||||
void setOk()
|
||||
{
|
||||
span_->SetStatus(opentelemetry::trace::StatusCode::kOk);
|
||||
}
|
||||
|
||||
/** Set span status with code and description */
|
||||
void setStatus(
|
||||
opentelemetry::trace::StatusCode code,
|
||||
std::string_view description = "")
|
||||
{
|
||||
span_->SetStatus(code, std::string(description));
|
||||
}
|
||||
|
||||
/** Set an attribute on the span */
|
||||
template<typename T>
|
||||
void setAttribute(std::string_view key, T&& value)
|
||||
{
|
||||
span_->SetAttribute(
|
||||
opentelemetry::nostd::string_view(key.data(), key.size()),
|
||||
std::forward<T>(value));
|
||||
}
|
||||
|
||||
/** Add an event to the span */
|
||||
void addEvent(std::string_view name)
|
||||
{
|
||||
span_->AddEvent(std::string(name));
|
||||
}
|
||||
|
||||
/** Record an exception on the span */
|
||||
void recordException(std::exception const& e)
|
||||
{
|
||||
span_->RecordException(e);
|
||||
span_->SetStatus(
|
||||
opentelemetry::trace::StatusCode::kError,
|
||||
e.what());
|
||||
}
|
||||
|
||||
/** Get the current trace context */
|
||||
opentelemetry::context::Context context() const
|
||||
{
|
||||
return opentelemetry::context::RuntimeContext::GetCurrent();
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* No-op span guard for when tracing is disabled.
|
||||
* Provides the same interface but does nothing.
|
||||
*/
|
||||
class NullSpanGuard
|
||||
{
|
||||
public:
|
||||
NullSpanGuard() = default;
|
||||
|
||||
void setOk() {}
|
||||
void setStatus(opentelemetry::trace::StatusCode, std::string_view = "") {}
|
||||
|
||||
template<typename T>
|
||||
void setAttribute(std::string_view, T&&) {}
|
||||
|
||||
void addEvent(std::string_view) {}
|
||||
void recordException(std::exception const&) {}
|
||||
};
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.3 Instrumentation Macros
|
||||
|
||||
```cpp
|
||||
// src/xrpld/telemetry/TracingInstrumentation.h
|
||||
#pragma once
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
#include <xrpl/telemetry/SpanGuard.h>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// INSTRUMENTATION MACROS
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
#ifdef XRPL_ENABLE_TELEMETRY
|
||||
|
||||
// Start a span that is automatically ended when guard goes out of scope
|
||||
#define XRPL_TRACE_SPAN(telemetry, name) \
|
||||
auto _xrpl_span_ = (telemetry).startSpan(name); \
|
||||
::xrpl::telemetry::SpanGuard _xrpl_guard_(_xrpl_span_)
|
||||
|
||||
// Start a span with specific kind
|
||||
#define XRPL_TRACE_SPAN_KIND(telemetry, name, kind) \
|
||||
auto _xrpl_span_ = (telemetry).startSpan(name, kind); \
|
||||
::xrpl::telemetry::SpanGuard _xrpl_guard_(_xrpl_span_)
|
||||
|
||||
// Conditional span based on component
|
||||
#define XRPL_TRACE_TX(telemetry, name) \
|
||||
std::optional<::xrpl::telemetry::SpanGuard> _xrpl_guard_; \
|
||||
if ((telemetry).shouldTraceTransactions()) { \
|
||||
_xrpl_guard_.emplace((telemetry).startSpan(name)); \
|
||||
}
|
||||
|
||||
#define XRPL_TRACE_CONSENSUS(telemetry, name) \
|
||||
std::optional<::xrpl::telemetry::SpanGuard> _xrpl_guard_; \
|
||||
if ((telemetry).shouldTraceConsensus()) { \
|
||||
_xrpl_guard_.emplace((telemetry).startSpan(name)); \
|
||||
}
|
||||
|
||||
#define XRPL_TRACE_RPC(telemetry, name) \
|
||||
std::optional<::xrpl::telemetry::SpanGuard> _xrpl_guard_; \
|
||||
if ((telemetry).shouldTraceRpc()) { \
|
||||
_xrpl_guard_.emplace((telemetry).startSpan(name)); \
|
||||
}
|
||||
|
||||
// Set attribute on current span (if exists)
|
||||
#define XRPL_TRACE_SET_ATTR(key, value) \
|
||||
if (_xrpl_guard_.has_value()) { \
|
||||
_xrpl_guard_->setAttribute(key, value); \
|
||||
}
|
||||
|
||||
// Record exception on current span
|
||||
#define XRPL_TRACE_EXCEPTION(e) \
|
||||
if (_xrpl_guard_.has_value()) { \
|
||||
_xrpl_guard_->recordException(e); \
|
||||
}
|
||||
|
||||
#else // XRPL_ENABLE_TELEMETRY not defined
|
||||
|
||||
#define XRPL_TRACE_SPAN(telemetry, name) ((void)0)
|
||||
#define XRPL_TRACE_SPAN_KIND(telemetry, name, kind) ((void)0)
|
||||
#define XRPL_TRACE_TX(telemetry, name) ((void)0)
|
||||
#define XRPL_TRACE_CONSENSUS(telemetry, name) ((void)0)
|
||||
#define XRPL_TRACE_RPC(telemetry, name) ((void)0)
|
||||
#define XRPL_TRACE_SET_ATTR(key, value) ((void)0)
|
||||
#define XRPL_TRACE_EXCEPTION(e) ((void)0)
|
||||
|
||||
#endif // XRPL_ENABLE_TELEMETRY
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.4 Protocol Buffer Extensions
|
||||
|
||||
### 4.4.1 TraceContext Message Definition
|
||||
|
||||
Add to `src/xrpld/overlay/detail/ripple.proto`:
|
||||
|
||||
```protobuf
|
||||
// Trace context for distributed tracing across nodes
|
||||
// Uses W3C Trace Context format internally
|
||||
message TraceContext {
|
||||
// 16-byte trace identifier (required for valid context)
|
||||
bytes trace_id = 1;
|
||||
|
||||
// 8-byte span identifier of parent span
|
||||
bytes span_id = 2;
|
||||
|
||||
// Trace flags (bit 0 = sampled)
|
||||
uint32 trace_flags = 3;
|
||||
|
||||
// W3C tracestate header value for vendor-specific data
|
||||
string trace_state = 4;
|
||||
}
|
||||
|
||||
// Extend existing messages with optional trace context
|
||||
// High field numbers (1000+) to avoid conflicts
|
||||
|
||||
message TMTransaction {
|
||||
// ... existing fields ...
|
||||
|
||||
// Optional trace context for distributed tracing
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
|
||||
message TMProposeSet {
|
||||
// ... existing fields ...
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
|
||||
message TMValidation {
|
||||
// ... existing fields ...
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
|
||||
message TMGetLedger {
|
||||
// ... existing fields ...
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
|
||||
message TMLedgerData {
|
||||
// ... existing fields ...
|
||||
optional TraceContext trace_context = 1001;
|
||||
}
|
||||
```
|
||||
|
||||
### 4.4.2 Context Serialization/Deserialization
|
||||
|
||||
```cpp
|
||||
// include/xrpl/telemetry/TraceContext.h
|
||||
#pragma once
|
||||
|
||||
#include <opentelemetry/context/context.h>
|
||||
#include <opentelemetry/trace/span_context.h>
|
||||
#include <protocol/messages.h> // Generated protobuf
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
/**
|
||||
* Utilities for trace context serialization and propagation.
|
||||
*/
|
||||
class TraceContextPropagator
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Extract trace context from Protocol Buffer message.
|
||||
* Returns empty context if no trace info present.
|
||||
*/
|
||||
static opentelemetry::context::Context
|
||||
extract(protocol::TraceContext const& proto);
|
||||
|
||||
/**
|
||||
* Inject current trace context into Protocol Buffer message.
|
||||
*/
|
||||
static void
|
||||
inject(
|
||||
opentelemetry::context::Context const& ctx,
|
||||
protocol::TraceContext& proto);
|
||||
|
||||
/**
|
||||
* Extract trace context from HTTP headers (for RPC).
|
||||
* Supports W3C Trace Context (traceparent, tracestate).
|
||||
*/
|
||||
static opentelemetry::context::Context
|
||||
extractFromHeaders(
|
||||
std::function<std::optional<std::string>(std::string_view)> headerGetter);
|
||||
|
||||
/**
|
||||
* Inject trace context into HTTP headers (for RPC responses).
|
||||
*/
|
||||
static void
|
||||
injectToHeaders(
|
||||
opentelemetry::context::Context const& ctx,
|
||||
std::function<void(std::string_view, std::string_view)> headerSetter);
|
||||
};
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// IMPLEMENTATION
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
inline opentelemetry::context::Context
|
||||
TraceContextPropagator::extract(protocol::TraceContext const& proto)
|
||||
{
|
||||
using namespace opentelemetry::trace;
|
||||
|
||||
if (proto.trace_id().size() != 16 || proto.span_id().size() != 8)
|
||||
return opentelemetry::context::Context{}; // Invalid, return empty
|
||||
|
||||
// Construct TraceId and SpanId from bytes
|
||||
TraceId traceId(reinterpret_cast<uint8_t const*>(proto.trace_id().data()));
|
||||
SpanId spanId(reinterpret_cast<uint8_t const*>(proto.span_id().data()));
|
||||
TraceFlags flags(static_cast<uint8_t>(proto.trace_flags()));
|
||||
|
||||
// Create SpanContext from extracted data
|
||||
SpanContext spanContext(traceId, spanId, flags, /* remote = */ true);
|
||||
|
||||
// Create context with extracted span as parent
|
||||
return opentelemetry::context::Context{}.SetValue(
|
||||
opentelemetry::trace::kSpanKey,
|
||||
opentelemetry::nostd::shared_ptr<Span>(
|
||||
new DefaultSpan(spanContext)));
|
||||
}
|
||||
|
||||
inline void
|
||||
TraceContextPropagator::inject(
|
||||
opentelemetry::context::Context const& ctx,
|
||||
protocol::TraceContext& proto)
|
||||
{
|
||||
using namespace opentelemetry::trace;
|
||||
|
||||
// Get current span from context
|
||||
auto span = GetSpan(ctx);
|
||||
if (!span)
|
||||
return;
|
||||
|
||||
auto const& spanCtx = span->GetContext();
|
||||
if (!spanCtx.IsValid())
|
||||
return;
|
||||
|
||||
// Serialize trace_id (16 bytes)
|
||||
auto const& traceId = spanCtx.trace_id();
|
||||
proto.set_trace_id(traceId.Id().data(), TraceId::kSize);
|
||||
|
||||
// Serialize span_id (8 bytes)
|
||||
auto const& spanId = spanCtx.span_id();
|
||||
proto.set_span_id(spanId.Id().data(), SpanId::kSize);
|
||||
|
||||
// Serialize flags
|
||||
proto.set_trace_flags(spanCtx.trace_flags().flags());
|
||||
|
||||
// Note: tracestate not implemented yet
|
||||
}
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.5 Module-Specific Instrumentation
|
||||
|
||||
### 4.5.1 Transaction Relay Instrumentation
|
||||
|
||||
```cpp
|
||||
// src/xrpld/overlay/detail/PeerImp.cpp (modified)
|
||||
|
||||
#include <xrpl/telemetry/TracingInstrumentation.h>
|
||||
|
||||
void
|
||||
PeerImp::handleTransaction(
|
||||
std::shared_ptr<protocol::TMTransaction> const& m)
|
||||
{
|
||||
// Extract trace context from incoming message
|
||||
opentelemetry::context::Context parentCtx;
|
||||
if (m->has_trace_context())
|
||||
{
|
||||
parentCtx = telemetry::TraceContextPropagator::extract(
|
||||
m->trace_context());
|
||||
}
|
||||
|
||||
// Start span as child of remote span (cross-node link)
|
||||
auto span = app_.getTelemetry().startSpan(
|
||||
"tx.receive",
|
||||
parentCtx,
|
||||
opentelemetry::trace::SpanKind::kServer);
|
||||
telemetry::SpanGuard guard(span);
|
||||
|
||||
try
|
||||
{
|
||||
// Parse and validate transaction
|
||||
SerialIter sit(makeSlice(m->rawtransaction()));
|
||||
auto stx = std::make_shared<STTx const>(sit);
|
||||
|
||||
// Add transaction attributes
|
||||
guard.setAttribute("xrpl.tx.hash", to_string(stx->getTransactionID()));
|
||||
guard.setAttribute("xrpl.tx.type", stx->getTxnType());
|
||||
guard.setAttribute("xrpl.peer.id", remote_address_.to_string());
|
||||
|
||||
// Check if we've seen this transaction (HashRouter)
|
||||
auto const [flags, suppressed] =
|
||||
app_.getHashRouter().addSuppressionPeer(
|
||||
stx->getTransactionID(),
|
||||
id_);
|
||||
|
||||
if (suppressed)
|
||||
{
|
||||
guard.setAttribute("xrpl.tx.suppressed", true);
|
||||
guard.addEvent("tx.duplicate");
|
||||
return; // Already processing this transaction
|
||||
}
|
||||
|
||||
// Create child span for validation
|
||||
{
|
||||
auto validateSpan = app_.getTelemetry().startSpan("tx.validate");
|
||||
telemetry::SpanGuard validateGuard(validateSpan);
|
||||
|
||||
auto [validity, reason] = checkTransaction(stx);
|
||||
validateGuard.setAttribute("xrpl.tx.validity",
|
||||
validity == Validity::Valid ? "valid" : "invalid");
|
||||
|
||||
if (validity != Validity::Valid)
|
||||
{
|
||||
validateGuard.setStatus(
|
||||
opentelemetry::trace::StatusCode::kError,
|
||||
reason);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Relay to other peers (capture context for propagation)
|
||||
auto ctx = guard.context();
|
||||
|
||||
// Create child span for relay
|
||||
auto relaySpan = app_.getTelemetry().startSpan(
|
||||
"tx.relay",
|
||||
ctx,
|
||||
opentelemetry::trace::SpanKind::kClient);
|
||||
{
|
||||
telemetry::SpanGuard relayGuard(relaySpan);
|
||||
|
||||
// Inject context into outgoing message
|
||||
protocol::TraceContext protoCtx;
|
||||
telemetry::TraceContextPropagator::inject(
|
||||
relayGuard.context(), protoCtx);
|
||||
|
||||
// Relay to other peers
|
||||
app_.overlay().relay(
|
||||
stx->getTransactionID(),
|
||||
*m,
|
||||
protoCtx, // Pass trace context
|
||||
exclusions);
|
||||
|
||||
relayGuard.setAttribute("xrpl.tx.relay_count",
|
||||
static_cast<int64_t>(relayCount));
|
||||
}
|
||||
|
||||
guard.setOk();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
guard.recordException(e);
|
||||
JLOG(journal_.warn()) << "Transaction handling failed: " << e.what();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4.5.2 Consensus Instrumentation
|
||||
|
||||
```cpp
|
||||
// src/xrpld/app/consensus/RCLConsensus.cpp (modified)
|
||||
|
||||
#include <xrpl/telemetry/TracingInstrumentation.h>
|
||||
|
||||
void
|
||||
RCLConsensusAdaptor::startRound(
|
||||
NetClock::time_point const& now,
|
||||
RCLCxLedger::ID const& prevLedgerHash,
|
||||
RCLCxLedger const& prevLedger,
|
||||
hash_set<NodeID> const& peers,
|
||||
bool proposing)
|
||||
{
|
||||
XRPL_TRACE_CONSENSUS(app_.getTelemetry(), "consensus.round");
|
||||
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.ledger.prev", to_string(prevLedgerHash));
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.ledger.seq",
|
||||
static_cast<int64_t>(prevLedger.seq() + 1));
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.proposers",
|
||||
static_cast<int64_t>(peers.size()));
|
||||
XRPL_TRACE_SET_ATTR("xrpl.consensus.mode",
|
||||
proposing ? "proposing" : "observing");
|
||||
|
||||
// Store trace context for use in phase transitions
|
||||
currentRoundContext_ = _xrpl_guard_.has_value()
|
||||
? _xrpl_guard_->context()
|
||||
: opentelemetry::context::Context{};
|
||||
|
||||
// ... existing implementation ...
|
||||
}
|
||||
|
||||
ConsensusPhase
|
||||
RCLConsensusAdaptor::phaseTransition(ConsensusPhase newPhase)
|
||||
{
|
||||
// Create span for phase transition
|
||||
auto span = app_.getTelemetry().startSpan(
|
||||
"consensus.phase." + to_string(newPhase),
|
||||
currentRoundContext_);
|
||||
telemetry::SpanGuard guard(span);
|
||||
|
||||
guard.setAttribute("xrpl.consensus.phase", to_string(newPhase));
|
||||
guard.addEvent("phase.enter");
|
||||
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
|
||||
try
|
||||
{
|
||||
auto result = doPhaseTransition(newPhase);
|
||||
|
||||
auto const duration = std::chrono::steady_clock::now() - startTime;
|
||||
guard.setAttribute("xrpl.consensus.phase_duration_ms",
|
||||
std::chrono::duration<double, std::milli>(duration).count());
|
||||
|
||||
guard.setOk();
|
||||
return result;
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
guard.recordException(e);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
RCLConsensusAdaptor::peerProposal(
|
||||
NetClock::time_point const& now,
|
||||
RCLCxPeerPos const& proposal)
|
||||
{
|
||||
// Extract trace context from proposal message
|
||||
opentelemetry::context::Context parentCtx;
|
||||
if (proposal.hasTraceContext())
|
||||
{
|
||||
parentCtx = telemetry::TraceContextPropagator::extract(
|
||||
proposal.traceContext());
|
||||
}
|
||||
|
||||
auto span = app_.getTelemetry().startSpan(
|
||||
"consensus.proposal.receive",
|
||||
parentCtx,
|
||||
opentelemetry::trace::SpanKind::kServer);
|
||||
telemetry::SpanGuard guard(span);
|
||||
|
||||
guard.setAttribute("xrpl.consensus.proposer",
|
||||
toBase58(TokenType::NodePublic, proposal.nodeId()));
|
||||
guard.setAttribute("xrpl.consensus.round",
|
||||
static_cast<int64_t>(proposal.proposal().proposeSeq()));
|
||||
|
||||
// ... existing implementation ...
|
||||
|
||||
guard.setOk();
|
||||
}
|
||||
```
|
||||
|
||||
### 4.5.3 RPC Handler Instrumentation
|
||||
|
||||
```cpp
|
||||
// src/xrpld/rpc/detail/ServerHandler.cpp (modified)
|
||||
|
||||
#include <xrpl/telemetry/TracingInstrumentation.h>
|
||||
|
||||
void
|
||||
ServerHandler::onRequest(
|
||||
http_request_type&& req,
|
||||
std::function<void(http_response_type&&)>&& send)
|
||||
{
|
||||
// Extract trace context from HTTP headers (W3C Trace Context)
|
||||
auto parentCtx = telemetry::TraceContextPropagator::extractFromHeaders(
|
||||
[&req](std::string_view name) -> std::optional<std::string> {
|
||||
auto it = req.find(boost::beast::http::field{
|
||||
std::string(name)});
|
||||
if (it != req.end())
|
||||
return std::string(it->value());
|
||||
return std::nullopt;
|
||||
});
|
||||
|
||||
// Start request span
|
||||
auto span = app_.getTelemetry().startSpan(
|
||||
"rpc.request",
|
||||
parentCtx,
|
||||
opentelemetry::trace::SpanKind::kServer);
|
||||
telemetry::SpanGuard guard(span);
|
||||
|
||||
// Add HTTP attributes
|
||||
guard.setAttribute("http.method", std::string(req.method_string()));
|
||||
guard.setAttribute("http.target", std::string(req.target()));
|
||||
guard.setAttribute("http.user_agent",
|
||||
std::string(req[boost::beast::http::field::user_agent]));
|
||||
|
||||
auto const startTime = std::chrono::steady_clock::now();
|
||||
|
||||
try
|
||||
{
|
||||
// Parse and process request
|
||||
auto const& body = req.body();
|
||||
Json::Value jv;
|
||||
Json::Reader reader;
|
||||
|
||||
if (!reader.parse(body, jv))
|
||||
{
|
||||
guard.setStatus(
|
||||
opentelemetry::trace::StatusCode::kError,
|
||||
"Invalid JSON");
|
||||
sendError(send, "Invalid JSON");
|
||||
return;
|
||||
}
|
||||
|
||||
// Extract command name
|
||||
std::string command = jv.isMember("command")
|
||||
? jv["command"].asString()
|
||||
: jv.isMember("method")
|
||||
? jv["method"].asString()
|
||||
: "unknown";
|
||||
|
||||
guard.setAttribute("xrpl.rpc.command", command);
|
||||
|
||||
// Create child span for command execution
|
||||
auto cmdSpan = app_.getTelemetry().startSpan(
|
||||
"rpc.command." + command);
|
||||
{
|
||||
telemetry::SpanGuard cmdGuard(cmdSpan);
|
||||
|
||||
// Execute RPC command
|
||||
auto result = processRequest(jv);
|
||||
|
||||
// Record result attributes
|
||||
if (result.isMember("status"))
|
||||
{
|
||||
cmdGuard.setAttribute("xrpl.rpc.status",
|
||||
result["status"].asString());
|
||||
}
|
||||
|
||||
if (result["status"].asString() == "error")
|
||||
{
|
||||
cmdGuard.setStatus(
|
||||
opentelemetry::trace::StatusCode::kError,
|
||||
result.isMember("error_message")
|
||||
? result["error_message"].asString()
|
||||
: "RPC error");
|
||||
}
|
||||
else
|
||||
{
|
||||
cmdGuard.setOk();
|
||||
}
|
||||
}
|
||||
|
||||
auto const duration = std::chrono::steady_clock::now() - startTime;
|
||||
guard.setAttribute("http.duration_ms",
|
||||
std::chrono::duration<double, std::milli>(duration).count());
|
||||
|
||||
// Inject trace context into response headers
|
||||
http_response_type resp;
|
||||
telemetry::TraceContextPropagator::injectToHeaders(
|
||||
guard.context(),
|
||||
[&resp](std::string_view name, std::string_view value) {
|
||||
resp.set(std::string(name), std::string(value));
|
||||
});
|
||||
|
||||
guard.setOk();
|
||||
send(std::move(resp));
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
guard.recordException(e);
|
||||
JLOG(journal_.error()) << "RPC request failed: " << e.what();
|
||||
sendError(send, e.what());
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4.5.4 JobQueue Context Propagation
|
||||
|
||||
```cpp
|
||||
// src/xrpld/core/JobQueue.h (modified)
|
||||
|
||||
#include <opentelemetry/context/context.h>
|
||||
|
||||
class Job
|
||||
{
|
||||
// ... existing members ...
|
||||
|
||||
// Captured trace context at job creation
|
||||
opentelemetry::context::Context traceContext_;
|
||||
|
||||
public:
|
||||
// Constructor captures current trace context
|
||||
Job(JobType type, std::function<void()> func, ...)
|
||||
: type_(type)
|
||||
, func_(std::move(func))
|
||||
, traceContext_(opentelemetry::context::RuntimeContext::GetCurrent())
|
||||
// ... other initializations ...
|
||||
{
|
||||
}
|
||||
|
||||
// Get trace context for restoration during execution
|
||||
opentelemetry::context::Context const&
|
||||
traceContext() const { return traceContext_; }
|
||||
};
|
||||
|
||||
// src/xrpld/core/JobQueue.cpp (modified)
|
||||
|
||||
void
|
||||
Worker::run()
|
||||
{
|
||||
while (auto job = getJob())
|
||||
{
|
||||
// Restore trace context from job creation
|
||||
auto token = opentelemetry::context::RuntimeContext::Attach(
|
||||
job->traceContext());
|
||||
|
||||
// Start execution span
|
||||
auto span = app_.getTelemetry().startSpan("job.execute");
|
||||
telemetry::SpanGuard guard(span);
|
||||
|
||||
guard.setAttribute("xrpl.job.type", to_string(job->type()));
|
||||
guard.setAttribute("xrpl.job.queue_ms", job->queueTimeMs());
|
||||
guard.setAttribute("xrpl.job.worker", workerId_);
|
||||
|
||||
try
|
||||
{
|
||||
job->execute();
|
||||
guard.setOk();
|
||||
}
|
||||
catch (std::exception const& e)
|
||||
{
|
||||
guard.recordException(e);
|
||||
JLOG(journal_.error()) << "Job execution failed: " << e.what();
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.6 Span Flow Visualization
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph Client["External Client"]
|
||||
submit["Submit TX"]
|
||||
end
|
||||
|
||||
subgraph NodeA["rippled Node A"]
|
||||
rpcA["rpc.request"]
|
||||
cmdA["rpc.command.submit"]
|
||||
txRecvA["tx.receive"]
|
||||
txValA["tx.validate"]
|
||||
txRelayA["tx.relay"]
|
||||
end
|
||||
|
||||
subgraph NodeB["rippled Node B"]
|
||||
txRecvB["tx.receive"]
|
||||
txValB["tx.validate"]
|
||||
txRelayB["tx.relay"]
|
||||
end
|
||||
|
||||
subgraph NodeC["rippled Node C"]
|
||||
txRecvC["tx.receive"]
|
||||
consensusC["consensus.round"]
|
||||
phaseC["consensus.phase.establish"]
|
||||
end
|
||||
|
||||
submit --> rpcA
|
||||
rpcA --> cmdA
|
||||
cmdA --> txRecvA
|
||||
txRecvA --> txValA
|
||||
txValA --> txRelayA
|
||||
txRelayA -.->|"TraceContext"| txRecvB
|
||||
txRecvB --> txValB
|
||||
txValB --> txRelayB
|
||||
txRelayB -.->|"TraceContext"| txRecvC
|
||||
txRecvC --> consensusC
|
||||
consensusC --> phaseC
|
||||
|
||||
style Client fill:#334155,stroke:#1e293b,color:#fff
|
||||
style NodeA fill:#1e3a8a,stroke:#172554,color:#fff
|
||||
style NodeB fill:#064e3b,stroke:#022c22,color:#fff
|
||||
style NodeC fill:#78350f,stroke:#451a03,color:#fff
|
||||
style submit fill:#e2e8f0,stroke:#cbd5e1,color:#1e293b
|
||||
style rpcA fill:#1d4ed8,stroke:#1e40af,color:#fff
|
||||
style cmdA fill:#1d4ed8,stroke:#1e40af,color:#fff
|
||||
style txRecvA fill:#047857,stroke:#064e3b,color:#fff
|
||||
style txValA fill:#047857,stroke:#064e3b,color:#fff
|
||||
style txRelayA fill:#047857,stroke:#064e3b,color:#fff
|
||||
style txRecvB fill:#047857,stroke:#064e3b,color:#fff
|
||||
style txValB fill:#047857,stroke:#064e3b,color:#fff
|
||||
style txRelayB fill:#047857,stroke:#064e3b,color:#fff
|
||||
style txRecvC fill:#047857,stroke:#064e3b,color:#fff
|
||||
style consensusC fill:#fef3c7,stroke:#fde68a,color:#1e293b
|
||||
style phaseC fill:#fef3c7,stroke:#fde68a,color:#1e293b
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Implementation Strategy](./03-implementation-strategy.md)* | *Next: [Configuration Reference](./05-configuration-reference.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
936
OpenTelemetryPlan/05-configuration-reference.md
Normal file
936
OpenTelemetryPlan/05-configuration-reference.md
Normal file
@@ -0,0 +1,936 @@
|
||||
# Configuration Reference
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Code Samples](./04-code-samples.md) | [Implementation Phases](./06-implementation-phases.md)
|
||||
|
||||
---
|
||||
|
||||
## 5.1 rippled Configuration
|
||||
|
||||
### 5.1.1 Configuration File Section
|
||||
|
||||
Add to `cfg/xrpld-example.cfg`:
|
||||
|
||||
```ini
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY (OpenTelemetry Distributed Tracing)
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
#
|
||||
# Enables distributed tracing for transaction flow, consensus, and RPC calls.
|
||||
# Traces are exported to an OpenTelemetry Collector using OTLP protocol.
|
||||
#
|
||||
# [telemetry]
|
||||
#
|
||||
# # Enable/disable telemetry (default: 0 = disabled)
|
||||
# enabled=1
|
||||
#
|
||||
# # Exporter type: "otlp_grpc" (default), "otlp_http", or "none"
|
||||
# exporter=otlp_grpc
|
||||
#
|
||||
# # OTLP endpoint (default: localhost:4317 for gRPC, localhost:4318 for HTTP)
|
||||
# endpoint=localhost:4317
|
||||
#
|
||||
# # Use TLS for exporter connection (default: 0)
|
||||
# use_tls=0
|
||||
#
|
||||
# # Path to CA certificate for TLS (optional)
|
||||
# # tls_ca_cert=/path/to/ca.crt
|
||||
#
|
||||
# # Sampling ratio: 0.0-1.0 (default: 1.0 = 100% sampling)
|
||||
# # Use lower values in production to reduce overhead
|
||||
# sampling_ratio=0.1
|
||||
#
|
||||
# # Batch processor settings
|
||||
# batch_size=512 # Spans per batch (default: 512)
|
||||
# batch_delay_ms=5000 # Max delay before sending batch (default: 5000)
|
||||
# max_queue_size=2048 # Max queued spans (default: 2048)
|
||||
#
|
||||
# # Component-specific tracing (default: all enabled except peer)
|
||||
# trace_transactions=1 # Transaction relay and processing
|
||||
# trace_consensus=1 # Consensus rounds and proposals
|
||||
# trace_rpc=1 # RPC request handling
|
||||
# trace_peer=0 # Peer messages (high volume, disabled by default)
|
||||
# trace_ledger=1 # Ledger acquisition and building
|
||||
#
|
||||
# # Service identification (automatically detected if not specified)
|
||||
# # service_name=rippled
|
||||
# # service_instance_id=<node_public_key>
|
||||
|
||||
[telemetry]
|
||||
enabled=0
|
||||
```
|
||||
|
||||
### 5.1.2 Configuration Options Summary
|
||||
|
||||
| Option | Type | Default | Description |
|
||||
| --------------------- | ------ | ---------------- | ----------------------------------------- |
|
||||
| `enabled` | bool | `false` | Enable/disable telemetry |
|
||||
| `exporter` | string | `"otlp_grpc"` | Exporter type: otlp_grpc, otlp_http, none |
|
||||
| `endpoint` | string | `localhost:4317` | OTLP collector endpoint |
|
||||
| `use_tls` | bool | `false` | Enable TLS for exporter connection |
|
||||
| `tls_ca_cert` | string | `""` | Path to CA certificate file |
|
||||
| `sampling_ratio` | float | `1.0` | Sampling ratio (0.0-1.0) |
|
||||
| `batch_size` | uint | `512` | Spans per export batch |
|
||||
| `batch_delay_ms` | uint | `5000` | Max delay before sending batch (ms) |
|
||||
| `max_queue_size` | uint | `2048` | Maximum queued spans |
|
||||
| `trace_transactions` | bool | `true` | Enable transaction tracing |
|
||||
| `trace_consensus` | bool | `true` | Enable consensus tracing |
|
||||
| `trace_rpc` | bool | `true` | Enable RPC tracing |
|
||||
| `trace_peer` | bool | `false` | Enable peer message tracing (high volume) |
|
||||
| `trace_ledger` | bool | `true` | Enable ledger tracing |
|
||||
| `service_name` | string | `"rippled"` | Service name for traces |
|
||||
| `service_instance_id` | string | `<node_pubkey>` | Instance identifier |
|
||||
|
||||
---
|
||||
|
||||
## 5.2 Configuration Parser
|
||||
|
||||
```cpp
|
||||
// src/libxrpl/telemetry/TelemetryConfig.cpp
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
#include <xrpl/basics/Log.h>
|
||||
|
||||
namespace xrpl {
|
||||
namespace telemetry {
|
||||
|
||||
Telemetry::Setup
|
||||
setup_Telemetry(
|
||||
Section const& section,
|
||||
std::string const& nodePublicKey,
|
||||
std::string const& version)
|
||||
{
|
||||
Telemetry::Setup setup;
|
||||
|
||||
// Basic settings
|
||||
setup.enabled = section.value_or("enabled", false);
|
||||
setup.serviceName = section.value_or("service_name", "rippled");
|
||||
setup.serviceVersion = version;
|
||||
setup.serviceInstanceId = section.value_or(
|
||||
"service_instance_id", nodePublicKey);
|
||||
|
||||
// Exporter settings
|
||||
setup.exporterType = section.value_or("exporter", "otlp_grpc");
|
||||
|
||||
if (setup.exporterType == "otlp_grpc")
|
||||
setup.exporterEndpoint = section.value_or("endpoint", "localhost:4317");
|
||||
else if (setup.exporterType == "otlp_http")
|
||||
setup.exporterEndpoint = section.value_or("endpoint", "localhost:4318");
|
||||
|
||||
setup.useTls = section.value_or("use_tls", false);
|
||||
setup.tlsCertPath = section.value_or("tls_ca_cert", "");
|
||||
|
||||
// Sampling
|
||||
setup.samplingRatio = section.value_or("sampling_ratio", 1.0);
|
||||
if (setup.samplingRatio < 0.0 || setup.samplingRatio > 1.0)
|
||||
{
|
||||
Throw<std::runtime_error>(
|
||||
"telemetry.sampling_ratio must be between 0.0 and 1.0");
|
||||
}
|
||||
|
||||
// Batch processor
|
||||
setup.batchSize = section.value_or("batch_size", 512u);
|
||||
setup.batchDelay = std::chrono::milliseconds{
|
||||
section.value_or("batch_delay_ms", 5000u)};
|
||||
setup.maxQueueSize = section.value_or("max_queue_size", 2048u);
|
||||
|
||||
// Component filtering
|
||||
setup.traceTransactions = section.value_or("trace_transactions", true);
|
||||
setup.traceConsensus = section.value_or("trace_consensus", true);
|
||||
setup.traceRpc = section.value_or("trace_rpc", true);
|
||||
setup.tracePeer = section.value_or("trace_peer", false);
|
||||
setup.traceLedger = section.value_or("trace_ledger", true);
|
||||
|
||||
return setup;
|
||||
}
|
||||
|
||||
} // namespace telemetry
|
||||
} // namespace xrpl
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.3 Application Integration
|
||||
|
||||
### 5.3.1 ApplicationImp Changes
|
||||
|
||||
```cpp
|
||||
// src/xrpld/app/main/Application.cpp (modified)
|
||||
|
||||
#include <xrpl/telemetry/Telemetry.h>
|
||||
|
||||
class ApplicationImp : public Application
|
||||
{
|
||||
// ... existing members ...
|
||||
|
||||
// Telemetry (must be constructed early, destroyed late)
|
||||
std::unique_ptr<telemetry::Telemetry> telemetry_;
|
||||
|
||||
public:
|
||||
ApplicationImp(...)
|
||||
{
|
||||
// Initialize telemetry early (before other components)
|
||||
auto telemetrySection = config_->section("telemetry");
|
||||
auto telemetrySetup = telemetry::setup_Telemetry(
|
||||
telemetrySection,
|
||||
toBase58(TokenType::NodePublic, nodeIdentity_.publicKey()),
|
||||
BuildInfo::getVersionString());
|
||||
|
||||
// Set network attributes
|
||||
telemetrySetup.networkId = config_->NETWORK_ID;
|
||||
telemetrySetup.networkType = [&]() {
|
||||
if (config_->NETWORK_ID == 0) return "mainnet";
|
||||
if (config_->NETWORK_ID == 1) return "testnet";
|
||||
if (config_->NETWORK_ID == 2) return "devnet";
|
||||
return "custom";
|
||||
}();
|
||||
|
||||
telemetry_ = telemetry::make_Telemetry(
|
||||
telemetrySetup,
|
||||
logs_->journal("Telemetry"));
|
||||
|
||||
// ... rest of initialization ...
|
||||
}
|
||||
|
||||
void start() override
|
||||
{
|
||||
// Start telemetry first
|
||||
if (telemetry_)
|
||||
telemetry_->start();
|
||||
|
||||
// ... existing start code ...
|
||||
}
|
||||
|
||||
void stop() override
|
||||
{
|
||||
// ... existing stop code ...
|
||||
|
||||
// Stop telemetry last (to capture shutdown spans)
|
||||
if (telemetry_)
|
||||
telemetry_->stop();
|
||||
}
|
||||
|
||||
telemetry::Telemetry& getTelemetry() override
|
||||
{
|
||||
assert(telemetry_);
|
||||
return *telemetry_;
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### 5.3.2 Application Interface Addition
|
||||
|
||||
```cpp
|
||||
// include/xrpl/app/main/Application.h (modified)
|
||||
|
||||
namespace telemetry { class Telemetry; }
|
||||
|
||||
class Application
|
||||
{
|
||||
public:
|
||||
// ... existing virtual methods ...
|
||||
|
||||
/** Get the telemetry system for distributed tracing */
|
||||
virtual telemetry::Telemetry& getTelemetry() = 0;
|
||||
};
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.4 CMake Integration
|
||||
|
||||
### 5.4.1 Find OpenTelemetry Module
|
||||
|
||||
```cmake
|
||||
# cmake/FindOpenTelemetry.cmake
|
||||
|
||||
# Find OpenTelemetry C++ SDK
|
||||
#
|
||||
# This module defines:
|
||||
# OpenTelemetry_FOUND - System has OpenTelemetry
|
||||
# OpenTelemetry::api - API library target
|
||||
# OpenTelemetry::sdk - SDK library target
|
||||
# OpenTelemetry::otlp_grpc_exporter - OTLP gRPC exporter target
|
||||
# OpenTelemetry::otlp_http_exporter - OTLP HTTP exporter target
|
||||
|
||||
find_package(opentelemetry-cpp CONFIG QUIET)
|
||||
|
||||
if(opentelemetry-cpp_FOUND)
|
||||
set(OpenTelemetry_FOUND TRUE)
|
||||
|
||||
# Create imported targets if not already created by config
|
||||
if(NOT TARGET OpenTelemetry::api)
|
||||
add_library(OpenTelemetry::api ALIAS opentelemetry-cpp::api)
|
||||
endif()
|
||||
if(NOT TARGET OpenTelemetry::sdk)
|
||||
add_library(OpenTelemetry::sdk ALIAS opentelemetry-cpp::sdk)
|
||||
endif()
|
||||
if(NOT TARGET OpenTelemetry::otlp_grpc_exporter)
|
||||
add_library(OpenTelemetry::otlp_grpc_exporter ALIAS
|
||||
opentelemetry-cpp::otlp_grpc_exporter)
|
||||
endif()
|
||||
else()
|
||||
# Try pkg-config fallback
|
||||
find_package(PkgConfig QUIET)
|
||||
if(PKG_CONFIG_FOUND)
|
||||
pkg_check_modules(OTEL opentelemetry-cpp QUIET)
|
||||
if(OTEL_FOUND)
|
||||
set(OpenTelemetry_FOUND TRUE)
|
||||
# Create imported targets from pkg-config
|
||||
add_library(OpenTelemetry::api INTERFACE IMPORTED)
|
||||
target_include_directories(OpenTelemetry::api INTERFACE
|
||||
${OTEL_INCLUDE_DIRS})
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(FindPackageHandleStandardArgs)
|
||||
find_package_handle_standard_args(OpenTelemetry
|
||||
REQUIRED_VARS OpenTelemetry_FOUND)
|
||||
```
|
||||
|
||||
### 5.4.2 CMakeLists.txt Changes
|
||||
|
||||
```cmake
|
||||
# CMakeLists.txt (additions)
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY OPTIONS
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
option(XRPL_ENABLE_TELEMETRY
|
||||
"Enable OpenTelemetry distributed tracing support" OFF)
|
||||
|
||||
if(XRPL_ENABLE_TELEMETRY)
|
||||
find_package(OpenTelemetry REQUIRED)
|
||||
|
||||
# Define compile-time flag
|
||||
add_compile_definitions(XRPL_ENABLE_TELEMETRY)
|
||||
|
||||
message(STATUS "OpenTelemetry tracing: ENABLED")
|
||||
else()
|
||||
message(STATUS "OpenTelemetry tracing: DISABLED")
|
||||
endif()
|
||||
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
# TELEMETRY LIBRARY
|
||||
# ═══════════════════════════════════════════════════════════════════════════════
|
||||
|
||||
if(XRPL_ENABLE_TELEMETRY)
|
||||
add_library(xrpl_telemetry
|
||||
src/libxrpl/telemetry/Telemetry.cpp
|
||||
src/libxrpl/telemetry/TelemetryConfig.cpp
|
||||
src/libxrpl/telemetry/TraceContext.cpp
|
||||
)
|
||||
|
||||
target_include_directories(xrpl_telemetry
|
||||
PUBLIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/include
|
||||
)
|
||||
|
||||
target_link_libraries(xrpl_telemetry
|
||||
PUBLIC
|
||||
OpenTelemetry::api
|
||||
OpenTelemetry::sdk
|
||||
OpenTelemetry::otlp_grpc_exporter
|
||||
PRIVATE
|
||||
xrpl_basics
|
||||
)
|
||||
|
||||
# Add to main library dependencies
|
||||
target_link_libraries(xrpld PRIVATE xrpl_telemetry)
|
||||
else()
|
||||
# Create null implementation library
|
||||
add_library(xrpl_telemetry
|
||||
src/libxrpl/telemetry/NullTelemetry.cpp
|
||||
)
|
||||
target_include_directories(xrpl_telemetry
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include
|
||||
)
|
||||
endif()
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.5 OpenTelemetry Collector Configuration
|
||||
|
||||
### 5.5.1 Development Configuration
|
||||
|
||||
```yaml
|
||||
# otel-collector-dev.yaml
|
||||
# Minimal configuration for local development
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
|
||||
processors:
|
||||
batch:
|
||||
timeout: 1s
|
||||
send_batch_size: 100
|
||||
|
||||
exporters:
|
||||
# Console output for debugging
|
||||
logging:
|
||||
verbosity: detailed
|
||||
sampling_initial: 5
|
||||
sampling_thereafter: 200
|
||||
|
||||
# Jaeger for trace visualization
|
||||
jaeger:
|
||||
endpoint: jaeger:14250
|
||||
tls:
|
||||
insecure: true
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [logging, jaeger]
|
||||
```
|
||||
|
||||
### 5.5.2 Production Configuration
|
||||
|
||||
```yaml
|
||||
# otel-collector-prod.yaml
|
||||
# Production configuration with filtering, sampling, and multiple backends
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
tls:
|
||||
cert_file: /etc/otel/server.crt
|
||||
key_file: /etc/otel/server.key
|
||||
ca_file: /etc/otel/ca.crt
|
||||
|
||||
processors:
|
||||
# Memory limiter to prevent OOM
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_mib: 1000
|
||||
spike_limit_mib: 200
|
||||
|
||||
# Batch processing for efficiency
|
||||
batch:
|
||||
timeout: 5s
|
||||
send_batch_size: 512
|
||||
send_batch_max_size: 1024
|
||||
|
||||
# Tail-based sampling (keep errors and slow traces)
|
||||
tail_sampling:
|
||||
decision_wait: 10s
|
||||
num_traces: 100000
|
||||
expected_new_traces_per_sec: 1000
|
||||
policies:
|
||||
# Always keep error traces
|
||||
- name: errors
|
||||
type: status_code
|
||||
status_code:
|
||||
status_codes: [ERROR]
|
||||
# Keep slow consensus rounds (>5s)
|
||||
- name: slow-consensus
|
||||
type: latency
|
||||
latency:
|
||||
threshold_ms: 5000
|
||||
# Keep slow RPC requests (>1s)
|
||||
- name: slow-rpc
|
||||
type: and
|
||||
and:
|
||||
and_sub_policy:
|
||||
- name: rpc-spans
|
||||
type: string_attribute
|
||||
string_attribute:
|
||||
key: xrpl.rpc.command
|
||||
values: [".*"]
|
||||
enabled_regex_matching: true
|
||||
- name: latency
|
||||
type: latency
|
||||
latency:
|
||||
threshold_ms: 1000
|
||||
# Probabilistic sampling for the rest
|
||||
- name: probabilistic
|
||||
type: probabilistic
|
||||
probabilistic:
|
||||
sampling_percentage: 10
|
||||
|
||||
# Attribute processing
|
||||
attributes:
|
||||
actions:
|
||||
# Hash sensitive data
|
||||
- key: xrpl.tx.account
|
||||
action: hash
|
||||
# Add deployment info
|
||||
- key: deployment.environment
|
||||
value: production
|
||||
action: upsert
|
||||
|
||||
exporters:
|
||||
# Grafana Tempo for long-term storage
|
||||
otlp/tempo:
|
||||
endpoint: tempo.monitoring:4317
|
||||
tls:
|
||||
insecure: false
|
||||
ca_file: /etc/otel/tempo-ca.crt
|
||||
|
||||
# Elastic APM for correlation with logs
|
||||
otlp/elastic:
|
||||
endpoint: apm.elastic:8200
|
||||
headers:
|
||||
Authorization: "Bearer ${ELASTIC_APM_TOKEN}"
|
||||
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [memory_limiter, tail_sampling, attributes, batch]
|
||||
exporters: [otlp/tempo, otlp/elastic]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.6 Docker Compose Development Environment
|
||||
|
||||
```yaml
|
||||
# docker-compose-telemetry.yaml
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# OpenTelemetry Collector
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector-contrib:0.92.0
|
||||
container_name: otel-collector
|
||||
command: ["--config=/etc/otel-collector-config.yaml"]
|
||||
volumes:
|
||||
- ./otel-collector-dev.yaml:/etc/otel-collector-config.yaml:ro
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC
|
||||
- "4318:4318" # OTLP HTTP
|
||||
- "13133:13133" # Health check
|
||||
depends_on:
|
||||
- jaeger
|
||||
|
||||
# Jaeger for trace visualization
|
||||
jaeger:
|
||||
image: jaegertracing/all-in-one:1.53
|
||||
container_name: jaeger
|
||||
environment:
|
||||
- COLLECTOR_OTLP_ENABLED=true
|
||||
ports:
|
||||
- "16686:16686" # UI
|
||||
- "14250:14250" # gRPC
|
||||
|
||||
# Grafana for dashboards
|
||||
grafana:
|
||||
image: grafana/grafana:10.2.3
|
||||
container_name: grafana
|
||||
environment:
|
||||
- GF_AUTH_ANONYMOUS_ENABLED=true
|
||||
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
|
||||
volumes:
|
||||
- ./grafana/provisioning:/etc/grafana/provisioning:ro
|
||||
- ./grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
depends_on:
|
||||
- jaeger
|
||||
|
||||
# Prometheus for metrics (optional, for correlation)
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.48.1
|
||||
container_name: prometheus
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yml:ro
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: rippled-telemetry
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.7 Configuration Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph config["Configuration Sources"]
|
||||
cfgFile["xrpld.cfg<br/>[telemetry] section"]
|
||||
cmake["CMake<br/>XRPL_ENABLE_TELEMETRY"]
|
||||
end
|
||||
|
||||
subgraph init["Initialization"]
|
||||
parse["setup_Telemetry()"]
|
||||
factory["make_Telemetry()"]
|
||||
end
|
||||
|
||||
subgraph runtime["Runtime Components"]
|
||||
tracer["TracerProvider"]
|
||||
exporter["OTLP Exporter"]
|
||||
processor["BatchProcessor"]
|
||||
end
|
||||
|
||||
subgraph collector["Collector Pipeline"]
|
||||
recv["Receivers"]
|
||||
proc["Processors"]
|
||||
exp["Exporters"]
|
||||
end
|
||||
|
||||
cfgFile --> parse
|
||||
cmake -->|"compile flag"| parse
|
||||
parse --> factory
|
||||
factory --> tracer
|
||||
tracer --> processor
|
||||
processor --> exporter
|
||||
exporter -->|"OTLP"| recv
|
||||
recv --> proc
|
||||
proc --> exp
|
||||
|
||||
style config fill:#e3f2fd,stroke:#1976d2
|
||||
style runtime fill:#e8f5e9,stroke:#388e3c
|
||||
style collector fill:#fff3e0,stroke:#ff9800
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5.8 Grafana Integration
|
||||
|
||||
Step-by-step instructions for integrating rippled traces with Grafana.
|
||||
|
||||
### 5.8.1 Data Source Configuration
|
||||
|
||||
#### Tempo (Recommended)
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/datasources/tempo.yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Tempo
|
||||
type: tempo
|
||||
access: proxy
|
||||
url: http://tempo:3200
|
||||
jsonData:
|
||||
httpMethod: GET
|
||||
tracesToLogs:
|
||||
datasourceUid: loki
|
||||
tags: ['service.name', 'xrpl.tx.hash']
|
||||
mappedTags: [{ key: 'trace_id', value: 'traceID' }]
|
||||
mapTagNamesEnabled: true
|
||||
filterByTraceID: true
|
||||
serviceMap:
|
||||
datasourceUid: prometheus
|
||||
nodeGraph:
|
||||
enabled: true
|
||||
search:
|
||||
hide: false
|
||||
lokiSearch:
|
||||
datasourceUid: loki
|
||||
```
|
||||
|
||||
#### Jaeger
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/datasources/jaeger.yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Jaeger
|
||||
type: jaeger
|
||||
access: proxy
|
||||
url: http://jaeger:16686
|
||||
jsonData:
|
||||
tracesToLogs:
|
||||
datasourceUid: loki
|
||||
tags: ['service.name']
|
||||
```
|
||||
|
||||
#### Elastic APM
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/datasources/elastic-apm.yaml
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Elasticsearch-APM
|
||||
type: elasticsearch
|
||||
access: proxy
|
||||
url: http://elasticsearch:9200
|
||||
database: "apm-*"
|
||||
jsonData:
|
||||
esVersion: "8.0.0"
|
||||
timeField: "@timestamp"
|
||||
logMessageField: message
|
||||
logLevelField: log.level
|
||||
```
|
||||
|
||||
### 5.8.2 Dashboard Provisioning
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/dashboards/dashboards.yaml
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'rippled-dashboards'
|
||||
orgId: 1
|
||||
folder: 'rippled'
|
||||
folderUid: 'rippled'
|
||||
type: file
|
||||
disableDeletion: false
|
||||
updateIntervalSeconds: 30
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards/rippled
|
||||
```
|
||||
|
||||
### 5.8.3 Example Dashboard: RPC Performance
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled RPC Performance",
|
||||
"uid": "rippled-rpc-performance",
|
||||
"panels": [
|
||||
{
|
||||
"title": "RPC Latency by Command",
|
||||
"type": "heatmap",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && span.xrpl.rpc.command != \"\"} | histogram_over_time(duration) by (span.xrpl.rpc.command)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "RPC Error Rate",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && status.code=error} | rate() by (span.xrpl.rpc.command)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Top 10 Slowest RPC Commands",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && span.xrpl.rpc.command != \"\"} | avg(duration) by (span.xrpl.rpc.command) | topk(10)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 8 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Traces",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\"}"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 16 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 5.8.4 Example Dashboard: Transaction Tracing
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Transaction Tracing",
|
||||
"uid": "rippled-tx-tracing",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Transaction Throughput",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.receive\"} | rate()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Cross-Node Relay Count",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.relay\"} | avg(span.xrpl.tx.relay_count)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 4 }
|
||||
},
|
||||
{
|
||||
"title": "Transaction Validation Errors",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.validate\" && status.code=error}"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 4 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 5.8.5 TraceQL Query Examples
|
||||
|
||||
Common queries for rippled traces:
|
||||
|
||||
```
|
||||
# Find all traces for a specific transaction hash
|
||||
{resource.service.name="rippled" && span.xrpl.tx.hash="ABC123..."}
|
||||
|
||||
# Find slow RPC commands (>100ms)
|
||||
{resource.service.name="rippled" && name=~"rpc.command.*"} | duration > 100ms
|
||||
|
||||
# Find consensus rounds taking >5 seconds
|
||||
{resource.service.name="rippled" && name="consensus.round"} | duration > 5s
|
||||
|
||||
# Find failed transactions with error details
|
||||
{resource.service.name="rippled" && name="tx.validate" && status.code=error}
|
||||
|
||||
# Find transactions relayed to many peers
|
||||
{resource.service.name="rippled" && name="tx.relay"} | span.xrpl.tx.relay_count > 10
|
||||
|
||||
# Compare latency across nodes
|
||||
{resource.service.name="rippled" && name="rpc.command.account_info"} | avg(duration) by (resource.service.instance.id)
|
||||
```
|
||||
|
||||
### 5.8.6 Correlation with PerfLog
|
||||
|
||||
To correlate OpenTelemetry traces with existing PerfLog data:
|
||||
|
||||
**Step 1: Configure Loki to ingest PerfLog**
|
||||
|
||||
```yaml
|
||||
# promtail-config.yaml
|
||||
scrape_configs:
|
||||
- job_name: rippled-perflog
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: rippled
|
||||
__path__: /var/log/rippled/perf*.log
|
||||
pipeline_stages:
|
||||
- json:
|
||||
expressions:
|
||||
trace_id: trace_id
|
||||
ledger_seq: ledger_seq
|
||||
tx_hash: tx_hash
|
||||
- labels:
|
||||
trace_id:
|
||||
ledger_seq:
|
||||
tx_hash:
|
||||
```
|
||||
|
||||
**Step 2: Add trace_id to PerfLog entries**
|
||||
|
||||
Modify PerfLog to include trace_id when available:
|
||||
|
||||
```cpp
|
||||
// In PerfLog output, add trace_id from current span context
|
||||
void logPerf(Json::Value& entry) {
|
||||
auto span = opentelemetry::trace::GetSpan(
|
||||
opentelemetry::context::RuntimeContext::GetCurrent());
|
||||
if (span && span->GetContext().IsValid()) {
|
||||
char traceIdHex[33];
|
||||
span->GetContext().trace_id().ToLowerBase16(traceIdHex);
|
||||
entry["trace_id"] = std::string(traceIdHex, 32);
|
||||
}
|
||||
// ... existing logging
|
||||
}
|
||||
```
|
||||
|
||||
**Step 3: Configure Grafana trace-to-logs link**
|
||||
|
||||
In Tempo data source configuration, set up the derived field:
|
||||
|
||||
```yaml
|
||||
jsonData:
|
||||
tracesToLogs:
|
||||
datasourceUid: loki
|
||||
tags: ['trace_id', 'xrpl.tx.hash']
|
||||
filterByTraceID: true
|
||||
filterBySpanID: false
|
||||
```
|
||||
|
||||
### 5.8.7 Correlation with Insight/StatsD Metrics
|
||||
|
||||
To correlate traces with existing Beast Insight metrics:
|
||||
|
||||
**Step 1: Export Insight metrics to Prometheus**
|
||||
|
||||
```yaml
|
||||
# prometheus.yaml
|
||||
scrape_configs:
|
||||
- job_name: 'rippled-statsd'
|
||||
static_configs:
|
||||
- targets: ['statsd-exporter:9102']
|
||||
```
|
||||
|
||||
**Step 2: Add exemplars to metrics**
|
||||
|
||||
OpenTelemetry SDK automatically adds exemplars (trace IDs) to metrics when using the Prometheus exporter. This links metrics spikes to specific traces.
|
||||
|
||||
**Step 3: Configure Grafana metric-to-trace link**
|
||||
|
||||
```yaml
|
||||
# In Prometheus data source
|
||||
jsonData:
|
||||
exemplarTraceIdDestinations:
|
||||
- name: trace_id
|
||||
datasourceUid: tempo
|
||||
```
|
||||
|
||||
**Step 4: Dashboard panel with exemplars**
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "RPC Latency with Trace Links",
|
||||
"type": "timeseries",
|
||||
"datasource": "Prometheus",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "histogram_quantile(0.99, rate(rippled_rpc_duration_seconds_bucket[5m]))",
|
||||
"exemplar": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This allows clicking on metric data points to jump directly to the related trace.
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Code Samples](./04-code-samples.md)* | *Next: [Implementation Phases](./06-implementation-phases.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
537
OpenTelemetryPlan/06-implementation-phases.md
Normal file
537
OpenTelemetryPlan/06-implementation-phases.md
Normal file
@@ -0,0 +1,537 @@
|
||||
# Implementation Phases
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Configuration Reference](./05-configuration-reference.md) | [Observability Backends](./07-observability-backends.md)
|
||||
|
||||
---
|
||||
|
||||
## 6.1 Phase Overview
|
||||
|
||||
```mermaid
|
||||
gantt
|
||||
title OpenTelemetry Implementation Timeline
|
||||
dateFormat YYYY-MM-DD
|
||||
axisFormat Week %W
|
||||
|
||||
section Phase 1
|
||||
Core Infrastructure :p1, 2024-01-01, 2w
|
||||
SDK Integration :p1a, 2024-01-01, 4d
|
||||
Telemetry Interface :p1b, after p1a, 3d
|
||||
Configuration & CMake :p1c, after p1b, 3d
|
||||
Unit Tests :p1d, after p1c, 2d
|
||||
|
||||
section Phase 2
|
||||
RPC Tracing :p2, after p1, 2w
|
||||
HTTP Context Extraction :p2a, after p1, 2d
|
||||
RPC Handler Instrumentation :p2b, after p2a, 4d
|
||||
WebSocket Support :p2c, after p2b, 2d
|
||||
Integration Tests :p2d, after p2c, 2d
|
||||
|
||||
section Phase 3
|
||||
Transaction Tracing :p3, after p2, 2w
|
||||
Protocol Buffer Extension :p3a, after p2, 2d
|
||||
PeerImp Instrumentation :p3b, after p3a, 3d
|
||||
Relay Context Propagation :p3c, after p3b, 3d
|
||||
Multi-node Tests :p3d, after p3c, 2d
|
||||
|
||||
section Phase 4
|
||||
Consensus Tracing :p4, after p3, 2w
|
||||
Consensus Round Spans :p4a, after p3, 3d
|
||||
Proposal Handling :p4b, after p4a, 3d
|
||||
Validation Tests :p4c, after p4b, 4d
|
||||
|
||||
section Phase 5
|
||||
Documentation & Deploy :p5, after p4, 1w
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6.2 Phase 1: Core Infrastructure (Weeks 1-2)
|
||||
|
||||
**Objective**: Establish foundational telemetry infrastructure
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Effort | Risk |
|
||||
| ---- | ----------------------------------------------------- | ------ | ------ |
|
||||
| 1.1 | Add OpenTelemetry C++ SDK to Conan/CMake | 2d | Low |
|
||||
| 1.2 | Implement `Telemetry` interface and factory | 2d | Low |
|
||||
| 1.3 | Implement `SpanGuard` RAII wrapper | 1d | Low |
|
||||
| 1.4 | Implement configuration parser | 1d | Low |
|
||||
| 1.5 | Integrate into `ApplicationImp` | 1d | Medium |
|
||||
| 1.6 | Add conditional compilation (`XRPL_ENABLE_TELEMETRY`) | 1d | Low |
|
||||
| 1.7 | Create `NullTelemetry` no-op implementation | 0.5d | Low |
|
||||
| 1.8 | Unit tests for core infrastructure | 1.5d | Low |
|
||||
|
||||
**Total Effort**: 10 days (2 developers)
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] OpenTelemetry SDK compiles and links
|
||||
- [ ] Telemetry can be enabled/disabled via config
|
||||
- [ ] Basic span creation works
|
||||
- [ ] No performance regression when disabled
|
||||
- [ ] Unit tests passing
|
||||
|
||||
---
|
||||
|
||||
## 6.3 Phase 2: RPC Tracing (Weeks 3-4)
|
||||
|
||||
**Objective**: Complete tracing for all RPC operations
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Effort | Risk |
|
||||
| ---- | -------------------------------------------------- | ------ | ------ |
|
||||
| 2.1 | Implement W3C Trace Context HTTP header extraction | 1d | Low |
|
||||
| 2.2 | Instrument `ServerHandler::onRequest()` | 1d | Low |
|
||||
| 2.3 | Instrument `RPCHandler::doCommand()` | 2d | Medium |
|
||||
| 2.4 | Add RPC-specific attributes | 1d | Low |
|
||||
| 2.5 | Instrument WebSocket handler | 1d | Medium |
|
||||
| 2.6 | Integration tests for RPC tracing | 2d | Low |
|
||||
| 2.7 | Performance benchmarks | 1d | Low |
|
||||
| 2.8 | Documentation | 1d | Low |
|
||||
|
||||
**Total Effort**: 10 days
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] All RPC commands traced
|
||||
- [ ] Trace context propagates from HTTP headers
|
||||
- [ ] WebSocket and HTTP both instrumented
|
||||
- [ ] <1ms overhead per RPC call
|
||||
- [ ] Integration tests passing
|
||||
|
||||
---
|
||||
|
||||
## 6.4 Phase 3: Transaction Tracing (Weeks 5-6)
|
||||
|
||||
**Objective**: Trace transaction lifecycle across network
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Effort | Risk |
|
||||
| ---- | --------------------------------------------- | ------ | ------ |
|
||||
| 3.1 | Define `TraceContext` Protocol Buffer message | 1d | Low |
|
||||
| 3.2 | Implement protobuf context serialization | 1d | Low |
|
||||
| 3.3 | Instrument `PeerImp::handleTransaction()` | 2d | Medium |
|
||||
| 3.4 | Instrument `NetworkOPs::submitTransaction()` | 1d | Medium |
|
||||
| 3.5 | Instrument HashRouter integration | 1d | Medium |
|
||||
| 3.6 | Implement relay context propagation | 2d | High |
|
||||
| 3.7 | Integration tests (multi-node) | 2d | Medium |
|
||||
| 3.8 | Performance benchmarks | 1d | Low |
|
||||
|
||||
**Total Effort**: 11 days
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] Transaction traces span across nodes
|
||||
- [ ] Trace context in Protocol Buffer messages
|
||||
- [ ] HashRouter deduplication visible in traces
|
||||
- [ ] Multi-node integration tests passing
|
||||
- [ ] <5% overhead on transaction throughput
|
||||
|
||||
---
|
||||
|
||||
## 6.5 Phase 4: Consensus Tracing (Weeks 7-8)
|
||||
|
||||
**Objective**: Full observability into consensus rounds
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Effort | Risk |
|
||||
| ---- | ---------------------------------------------- | ------ | ------ |
|
||||
| 4.1 | Instrument `RCLConsensusAdaptor::startRound()` | 1d | Medium |
|
||||
| 4.2 | Instrument phase transitions | 2d | Medium |
|
||||
| 4.3 | Instrument proposal handling | 2d | High |
|
||||
| 4.4 | Instrument validation handling | 1d | Medium |
|
||||
| 4.5 | Add consensus-specific attributes | 1d | Low |
|
||||
| 4.6 | Correlate with transaction traces | 1d | Medium |
|
||||
| 4.7 | Multi-validator integration tests | 2d | High |
|
||||
| 4.8 | Performance validation | 1d | Medium |
|
||||
|
||||
**Total Effort**: 11 days
|
||||
|
||||
### Exit Criteria
|
||||
|
||||
- [ ] Complete consensus round traces
|
||||
- [ ] Phase transitions visible
|
||||
- [ ] Proposals and validations traced
|
||||
- [ ] No impact on consensus timing
|
||||
- [ ] Multi-validator test network validated
|
||||
|
||||
---
|
||||
|
||||
## 6.6 Phase 5: Documentation & Deployment (Week 9)
|
||||
|
||||
**Objective**: Production readiness
|
||||
|
||||
### Tasks
|
||||
|
||||
| Task | Description | Effort | Risk |
|
||||
| ---- | ----------------------------- | ------ | ---- |
|
||||
| 5.1 | Operator runbook | 1d | Low |
|
||||
| 5.2 | Grafana dashboards | 1d | Low |
|
||||
| 5.3 | Alert definitions | 0.5d | Low |
|
||||
| 5.4 | Collector deployment examples | 0.5d | Low |
|
||||
| 5.5 | Developer documentation | 1d | Low |
|
||||
| 5.6 | Training materials | 0.5d | Low |
|
||||
| 5.7 | Final integration testing | 0.5d | Low |
|
||||
|
||||
**Total Effort**: 5 days
|
||||
|
||||
---
|
||||
|
||||
## 6.7 Risk Assessment
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Risk Assessment Matrix
|
||||
x-axis Low Impact --> High Impact
|
||||
y-axis Low Likelihood --> High Likelihood
|
||||
quadrant-1 Monitor Closely
|
||||
quadrant-2 Mitigate Immediately
|
||||
quadrant-3 Accept Risk
|
||||
quadrant-4 Plan Mitigation
|
||||
|
||||
SDK Compatibility: [0.25, 0.2]
|
||||
Protocol Changes: [0.75, 0.65]
|
||||
Performance Overhead: [0.65, 0.45]
|
||||
Context Propagation: [0.5, 0.5]
|
||||
Memory Leaks: [0.8, 0.2]
|
||||
```
|
||||
|
||||
### Risk Details
|
||||
|
||||
| Risk | Likelihood | Impact | Mitigation |
|
||||
| ------------------------------------ | ---------- | ------ | --------------------------------------- |
|
||||
| Protocol changes break compatibility | Medium | High | Use high field numbers, optional fields |
|
||||
| Performance overhead unacceptable | Medium | Medium | Sampling, conditional compilation |
|
||||
| Context propagation complexity | Medium | Medium | Phased rollout, extensive testing |
|
||||
| SDK compatibility issues | Low | Medium | Pin SDK version, fallback to no-op |
|
||||
| Memory leaks in long-running nodes | Low | High | Memory profiling, bounded queues |
|
||||
|
||||
---
|
||||
|
||||
## 6.8 Success Metrics
|
||||
|
||||
| Metric | Target | Measurement |
|
||||
| ------------------------ | ------------------------------ | --------------------- |
|
||||
| Trace coverage | >95% of transactions | Sampling verification |
|
||||
| CPU overhead | <3% | Benchmark tests |
|
||||
| Memory overhead | <5 MB | Memory profiling |
|
||||
| Latency impact (p99) | <2% | Performance tests |
|
||||
| Trace completeness | >99% spans with required attrs | Validation script |
|
||||
| Cross-node trace linkage | >90% of multi-hop transactions | Integration tests |
|
||||
|
||||
---
|
||||
|
||||
## 6.9 Effort Summary
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
%%{init: {'pie': {'textPosition': 0.75}}}%%
|
||||
pie showData
|
||||
"Phase 1: Core Infrastructure" : 10
|
||||
"Phase 2: RPC Tracing" : 10
|
||||
"Phase 3: Transaction Tracing" : 11
|
||||
"Phase 4: Consensus Tracing" : 11
|
||||
"Phase 5: Documentation" : 5
|
||||
```
|
||||
|
||||
**Total Effort Distribution (47 developer-days)**
|
||||
|
||||
</div>
|
||||
|
||||
### Resource Requirements
|
||||
|
||||
| Phase | Developers | Duration | Total Effort |
|
||||
| --------- | ---------- | ----------- | ------------ |
|
||||
| 1 | 2 | 2 weeks | 10 days |
|
||||
| 2 | 1-2 | 2 weeks | 10 days |
|
||||
| 3 | 2 | 2 weeks | 11 days |
|
||||
| 4 | 2 | 2 weeks | 11 days |
|
||||
| 5 | 1 | 1 week | 5 days |
|
||||
| **Total** | **2** | **9 weeks** | **47 days** |
|
||||
|
||||
---
|
||||
|
||||
## 6.10 Quick Wins and Crawl-Walk-Run Strategy
|
||||
|
||||
This section outlines a prioritized approach to maximize ROI with minimal initial investment.
|
||||
|
||||
### 6.10.1 Crawl-Walk-Run Overview
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph crawl["🐢 CRAWL (Week 1-2)"]
|
||||
direction LR
|
||||
c1[Core SDK Setup] ~~~ c2[RPC Tracing Only] ~~~ c3[Single Node]
|
||||
end
|
||||
|
||||
subgraph walk["🚶 WALK (Week 3-5)"]
|
||||
direction LR
|
||||
w1[Transaction Tracing] ~~~ w2[Cross-Node Context] ~~~ w3[Basic Dashboards]
|
||||
end
|
||||
|
||||
subgraph run["🏃 RUN (Week 6-9)"]
|
||||
direction LR
|
||||
r1[Consensus Tracing] ~~~ r2[Full Correlation] ~~~ r3[Production Deploy]
|
||||
end
|
||||
|
||||
crawl --> walk --> run
|
||||
|
||||
style crawl fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style walk fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style run fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style c1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style c2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style c3 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style w1 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style w2 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style w3 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style r1 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style r2 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style r3 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
### 6.10.2 Quick Wins (Immediate Value)
|
||||
|
||||
| Quick Win | Effort | Value | When to Deploy |
|
||||
| ------------------------------ | -------- | ------ | -------------- |
|
||||
| **RPC Command Tracing** | 2 days | High | Week 2 |
|
||||
| **RPC Latency Histograms** | 0.5 days | High | Week 2 |
|
||||
| **Error Rate Dashboard** | 0.5 days | Medium | Week 2 |
|
||||
| **Transaction Submit Tracing** | 1 day | High | Week 3 |
|
||||
| **Consensus Round Duration** | 1 day | Medium | Week 6 |
|
||||
|
||||
### 6.10.3 CRAWL Phase (Weeks 1-2)
|
||||
|
||||
**Goal**: Get basic tracing working with minimal code changes.
|
||||
|
||||
**What You Get**:
|
||||
- RPC request/response traces for all commands
|
||||
- Latency breakdown per RPC command
|
||||
- Error visibility with stack traces
|
||||
- Basic Grafana dashboard
|
||||
|
||||
**Code Changes**: ~15 lines in `ServerHandler.cpp`, ~40 lines in new telemetry module
|
||||
|
||||
**Why Start Here**:
|
||||
- RPC is the lowest-risk, highest-visibility component
|
||||
- Immediate value for debugging client issues
|
||||
- No cross-node complexity
|
||||
- Single file modification to existing code
|
||||
|
||||
### 6.10.4 WALK Phase (Weeks 3-5)
|
||||
|
||||
**Goal**: Add transaction lifecycle tracing across nodes.
|
||||
|
||||
**What You Get**:
|
||||
- End-to-end transaction traces from submit to relay
|
||||
- Cross-node correlation (see transaction path)
|
||||
- HashRouter deduplication visibility
|
||||
- Relay latency metrics
|
||||
|
||||
**Code Changes**: ~120 lines across 4 files, plus protobuf extension
|
||||
|
||||
**Why Do This Second**:
|
||||
- Builds on RPC tracing (transactions submitted via RPC)
|
||||
- Moderate complexity (requires context propagation)
|
||||
- High value for debugging transaction issues
|
||||
|
||||
### 6.10.5 RUN Phase (Weeks 6-9)
|
||||
|
||||
**Goal**: Full observability including consensus.
|
||||
|
||||
**What You Get**:
|
||||
- Complete consensus round visibility
|
||||
- Phase transition timing
|
||||
- Validator proposal tracking
|
||||
- Full end-to-end traces (client → RPC → TX → consensus → ledger)
|
||||
|
||||
**Code Changes**: ~100 lines across 3 consensus files
|
||||
|
||||
**Why Do This Last**:
|
||||
- Highest complexity (consensus is critical path)
|
||||
- Requires thorough testing
|
||||
- Lower relative value (consensus issues are rarer)
|
||||
|
||||
### 6.10.6 ROI Prioritization Matrix
|
||||
|
||||
```mermaid
|
||||
quadrantChart
|
||||
title Implementation ROI Matrix
|
||||
x-axis Low Effort --> High Effort
|
||||
y-axis Low Value --> High Value
|
||||
quadrant-1 Quick Wins - Do First
|
||||
quadrant-2 Major Projects - Plan Carefully
|
||||
quadrant-3 Nice to Have - Optional
|
||||
quadrant-4 Time Sinks - Avoid
|
||||
|
||||
RPC Tracing: [0.15, 0.9]
|
||||
TX Submit Trace: [0.25, 0.85]
|
||||
TX Relay Trace: [0.5, 0.8]
|
||||
Consensus Trace: [0.7, 0.75]
|
||||
Peer Message Trace: [0.85, 0.3]
|
||||
Ledger Acquire: [0.55, 0.5]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6.11 Definition of Done
|
||||
|
||||
Clear, measurable criteria for each phase.
|
||||
|
||||
### 6.11.1 Phase 1: Core Infrastructure
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| --------------- | ---------------------------------------------------------- | ---------------------------- |
|
||||
| SDK Integration | `cmake --build` succeeds with `-DXRPL_ENABLE_TELEMETRY=ON` | ✅ Compiles |
|
||||
| Runtime Toggle | `enabled=0` produces zero overhead | <0.1% CPU difference |
|
||||
| Span Creation | Unit test creates and exports span | Span appears in Jaeger |
|
||||
| Configuration | All config options parsed correctly | Config validation tests pass |
|
||||
| Documentation | Developer guide exists | PR approved |
|
||||
|
||||
**Definition of Done**: All criteria met, PR merged, no regressions in CI.
|
||||
|
||||
### 6.11.2 Phase 2: RPC Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| ------------------ | ---------------------------------- | -------------------------- |
|
||||
| Coverage | All RPC commands instrumented | 100% of commands |
|
||||
| Context Extraction | traceparent header propagates | Integration test passes |
|
||||
| Attributes | Command, status, duration recorded | Validation script confirms |
|
||||
| Performance | RPC latency overhead | <1ms p99 |
|
||||
| Dashboard | Grafana dashboard deployed | Screenshot in docs |
|
||||
|
||||
**Definition of Done**: RPC traces visible in Jaeger/Tempo for all commands, dashboard shows latency distribution.
|
||||
|
||||
### 6.11.3 Phase 3: Transaction Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| ---------------- | ------------------------------- | ---------------------------------- |
|
||||
| Local Trace | Submit → validate → TxQ traced | Single-node test passes |
|
||||
| Cross-Node | Context propagates via protobuf | Multi-node test passes |
|
||||
| Relay Visibility | relay_count attribute correct | Spot check 100 txs |
|
||||
| HashRouter | Deduplication visible in trace | Duplicate txs show suppressed=true |
|
||||
| Performance | TX throughput overhead | <5% degradation |
|
||||
|
||||
**Definition of Done**: Transaction traces span 3+ nodes in test network, performance within bounds.
|
||||
|
||||
### 6.11.4 Phase 4: Consensus Tracing
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| -------------------- | ----------------------------- | ------------------------- |
|
||||
| Round Tracing | startRound creates root span | Unit test passes |
|
||||
| Phase Visibility | All phases have child spans | Integration test confirms |
|
||||
| Proposer Attribution | Proposer ID in attributes | Spot check 50 rounds |
|
||||
| Timing Accuracy | Phase durations match PerfLog | <5% variance |
|
||||
| No Consensus Impact | Round timing unchanged | Performance test passes |
|
||||
|
||||
**Definition of Done**: Consensus rounds fully traceable, no impact on consensus timing.
|
||||
|
||||
### 6.11.5 Phase 5: Production Deployment
|
||||
|
||||
| Criterion | Measurement | Target |
|
||||
| ------------ | ---------------------------- | -------------------------- |
|
||||
| Collector HA | Multiple collectors deployed | No single point of failure |
|
||||
| Sampling | Tail sampling configured | 10% base + errors + slow |
|
||||
| Retention | Data retained per policy | 7 days hot, 30 days warm |
|
||||
| Alerting | Alerts configured | Error spike, high latency |
|
||||
| Runbook | Operator documentation | Approved by ops team |
|
||||
| Training | Team trained | Session completed |
|
||||
|
||||
**Definition of Done**: Telemetry running in production, operators trained, alerts active.
|
||||
|
||||
### 6.11.6 Success Metrics Summary
|
||||
|
||||
| Phase | Primary Metric | Secondary Metric | Deadline |
|
||||
| ------- | ---------------------- | --------------------------- | ------------- |
|
||||
| Phase 1 | SDK compiles and runs | Zero overhead when disabled | End of Week 2 |
|
||||
| Phase 2 | 100% RPC coverage | <1ms latency overhead | End of Week 4 |
|
||||
| Phase 3 | Cross-node traces work | <5% throughput impact | End of Week 6 |
|
||||
| Phase 4 | Consensus fully traced | No consensus timing impact | End of Week 8 |
|
||||
| Phase 5 | Production deployment | Operators trained | End of Week 9 |
|
||||
|
||||
---
|
||||
|
||||
## 6.12 Recommended Implementation Order
|
||||
|
||||
Based on ROI analysis, implement in this exact order:
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph week1["Week 1"]
|
||||
t1[1. OpenTelemetry SDK<br/>Conan/CMake integration]
|
||||
t2[2. Telemetry interface<br/>SpanGuard, config]
|
||||
end
|
||||
|
||||
subgraph week2["Week 2"]
|
||||
t3[3. RPC ServerHandler<br/>instrumentation]
|
||||
t4[4. Basic Jaeger setup<br/>for testing]
|
||||
end
|
||||
|
||||
subgraph week3["Week 3"]
|
||||
t5[5. Transaction submit<br/>tracing]
|
||||
t6[6. Grafana dashboard<br/>v1]
|
||||
end
|
||||
|
||||
subgraph week4["Week 4"]
|
||||
t7[7. Protobuf context<br/>extension]
|
||||
t8[8. PeerImp tx.relay<br/>instrumentation]
|
||||
end
|
||||
|
||||
subgraph week5["Week 5"]
|
||||
t9[9. Multi-node<br/>integration tests]
|
||||
t10[10. Performance<br/>benchmarks]
|
||||
end
|
||||
|
||||
subgraph week6_8["Weeks 6-8"]
|
||||
t11[11. Consensus<br/>instrumentation]
|
||||
t12[12. Full integration<br/>testing]
|
||||
end
|
||||
|
||||
subgraph week9["Week 9"]
|
||||
t13[13. Production<br/>deployment]
|
||||
t14[14. Documentation<br/>& training]
|
||||
end
|
||||
|
||||
t1 --> t2 --> t3 --> t4
|
||||
t4 --> t5 --> t6
|
||||
t6 --> t7 --> t8
|
||||
t8 --> t9 --> t10
|
||||
t10 --> t11 --> t12
|
||||
t12 --> t13 --> t14
|
||||
|
||||
style week1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style week2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style week3 fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style week4 fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style week5 fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style week6_8 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style week9 fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style t1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t3 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t4 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style t5 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t6 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t7 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t8 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t9 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t10 fill:#ffe0b2,stroke:#ffcc80,color:#1e293b
|
||||
style t11 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style t12 fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style t13 fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style t14 fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Configuration Reference](./05-configuration-reference.md)* | *Next: [Observability Backends](./07-observability-backends.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
590
OpenTelemetryPlan/07-observability-backends.md
Normal file
590
OpenTelemetryPlan/07-observability-backends.md
Normal file
@@ -0,0 +1,590 @@
|
||||
# Observability Backend Recommendations
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Implementation Phases](./06-implementation-phases.md) | [Appendix](./08-appendix.md)
|
||||
|
||||
---
|
||||
|
||||
## 7.1 Development/Testing Backends
|
||||
|
||||
| Backend | Pros | Cons | Use Case |
|
||||
| ---------- | ------------------- | ----------------- | ----------------- |
|
||||
| **Jaeger** | Easy setup, good UI | Limited retention | Local dev, CI |
|
||||
| **Zipkin** | Simple, lightweight | Basic features | Quick prototyping |
|
||||
|
||||
### Quick Start with Jaeger
|
||||
|
||||
```bash
|
||||
# Start Jaeger with OTLP support
|
||||
docker run -d --name jaeger \
|
||||
-e COLLECTOR_OTLP_ENABLED=true \
|
||||
-p 16686:16686 \
|
||||
-p 4317:4317 \
|
||||
-p 4318:4318 \
|
||||
jaegertracing/all-in-one:latest
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.2 Production Backends
|
||||
|
||||
| Backend | Pros | Cons | Use Case |
|
||||
| ----------------- | ----------------------------------------- | ------------------ | --------------------------- |
|
||||
| **Grafana Tempo** | Cost-effective, Grafana integration | Newer project | Most production deployments |
|
||||
| **Elastic APM** | Full observability stack, log correlation | Resource intensive | Existing Elastic users |
|
||||
| **Honeycomb** | Excellent query, high cardinality | SaaS cost | Deep debugging needs |
|
||||
| **Datadog APM** | Full platform, easy setup | SaaS cost | Enterprise with budget |
|
||||
|
||||
### Backend Selection Flowchart
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
start[Select Backend] --> budget{Budget<br/>Constraints?}
|
||||
|
||||
budget -->|Yes| oss[Open Source]
|
||||
budget -->|No| saas{Prefer<br/>SaaS?}
|
||||
|
||||
oss --> existing{Existing<br/>Stack?}
|
||||
existing -->|Grafana| tempo[Grafana Tempo]
|
||||
existing -->|Elastic| elastic[Elastic APM]
|
||||
existing -->|None| tempo
|
||||
|
||||
saas -->|Yes| enterprise{Enterprise<br/>Support?}
|
||||
saas -->|No| oss
|
||||
|
||||
enterprise -->|Yes| datadog[Datadog APM]
|
||||
enterprise -->|No| honeycomb[Honeycomb]
|
||||
|
||||
tempo --> final[Configure Collector]
|
||||
elastic --> final
|
||||
honeycomb --> final
|
||||
datadog --> final
|
||||
|
||||
style start fill:#0f172a,stroke:#020617,color:#fff
|
||||
style budget fill:#334155,stroke:#1e293b,color:#fff
|
||||
style oss fill:#1e293b,stroke:#0f172a,color:#fff
|
||||
style existing fill:#334155,stroke:#1e293b,color:#fff
|
||||
style saas fill:#334155,stroke:#1e293b,color:#fff
|
||||
style enterprise fill:#334155,stroke:#1e293b,color:#fff
|
||||
style final fill:#0f172a,stroke:#020617,color:#fff
|
||||
style tempo fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style elastic fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style honeycomb fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style datadog fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.3 Recommended Production Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph validators["Validator Nodes"]
|
||||
v1[rippled<br/>Validator 1]
|
||||
v2[rippled<br/>Validator 2]
|
||||
end
|
||||
|
||||
subgraph stock["Stock Nodes"]
|
||||
s1[rippled<br/>Stock 1]
|
||||
s2[rippled<br/>Stock 2]
|
||||
end
|
||||
|
||||
subgraph collector["OTel Collector Cluster"]
|
||||
c1[Collector<br/>DC1]
|
||||
c2[Collector<br/>DC2]
|
||||
end
|
||||
|
||||
subgraph backends["Storage Backends"]
|
||||
tempo[(Grafana<br/>Tempo)]
|
||||
elastic[(Elastic<br/>APM)]
|
||||
archive[(S3/GCS<br/>Archive)]
|
||||
end
|
||||
|
||||
subgraph ui["Visualization"]
|
||||
grafana[Grafana<br/>Dashboards]
|
||||
end
|
||||
|
||||
v1 -->|OTLP| c1
|
||||
v2 -->|OTLP| c1
|
||||
s1 -->|OTLP| c2
|
||||
s2 -->|OTLP| c2
|
||||
|
||||
c1 --> tempo
|
||||
c1 --> elastic
|
||||
c2 --> tempo
|
||||
c2 --> archive
|
||||
|
||||
tempo --> grafana
|
||||
elastic --> grafana
|
||||
|
||||
style validators fill:#b71c1c,stroke:#7f1d1d,color:#ffffff
|
||||
style stock fill:#0d47a1,stroke:#082f6a,color:#ffffff
|
||||
style collector fill:#bf360c,stroke:#8c2809,color:#ffffff
|
||||
style backends fill:#1b5e20,stroke:#0d3d14,color:#ffffff
|
||||
style ui fill:#4a148c,stroke:#2e0d57,color:#ffffff
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.4 Architecture Considerations
|
||||
|
||||
### 7.4.1 Collector Placement
|
||||
|
||||
| Strategy | Description | Pros | Cons |
|
||||
| ------------- | -------------------- | ------------------------ | ----------------------- |
|
||||
| **Sidecar** | Collector per node | Isolation, simple config | Resource overhead |
|
||||
| **DaemonSet** | Collector per host | Shared resources | Complexity |
|
||||
| **Gateway** | Central collector(s) | Centralized processing | Single point of failure |
|
||||
|
||||
**Recommendation**: Use **Gateway** pattern with regional collectors for rippled networks:
|
||||
- One collector cluster per datacenter/region
|
||||
- Tail-based sampling at collector level
|
||||
- Multiple export destinations for redundancy
|
||||
|
||||
### 7.4.2 Sampling Strategy
|
||||
|
||||
```mermaid
|
||||
flowchart LR
|
||||
subgraph head["Head Sampling (Node)"]
|
||||
hs[10% probabilistic]
|
||||
end
|
||||
|
||||
subgraph tail["Tail Sampling (Collector)"]
|
||||
ts1[Keep all errors]
|
||||
ts2[Keep slow >5s]
|
||||
ts3[Keep 10% rest]
|
||||
end
|
||||
|
||||
head --> tail
|
||||
|
||||
ts1 --> final[Final Traces]
|
||||
ts2 --> final
|
||||
ts3 --> final
|
||||
|
||||
style head fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style tail fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style hs fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style ts1 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style ts2 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style ts3 fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style final fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
```
|
||||
|
||||
### 7.4.3 Data Retention
|
||||
|
||||
| Environment | Hot Storage | Warm Storage | Cold Archive |
|
||||
| ----------- | ----------- | ------------ | ------------ |
|
||||
| Development | 24 hours | N/A | N/A |
|
||||
| Staging | 7 days | N/A | N/A |
|
||||
| Production | 7 days | 30 days | many years |
|
||||
|
||||
---
|
||||
|
||||
## 7.5 Integration Checklist
|
||||
|
||||
- [ ] Choose primary backend (Tempo recommended for cost/features)
|
||||
- [ ] Deploy collector cluster with high availability
|
||||
- [ ] Configure tail-based sampling for error/latency traces
|
||||
- [ ] Set up Grafana dashboards for trace visualization
|
||||
- [ ] Configure alerts for trace anomalies
|
||||
- [ ] Establish data retention policies
|
||||
- [ ] Test trace correlation with logs and metrics
|
||||
|
||||
---
|
||||
|
||||
## 7.6 Grafana Dashboard Examples
|
||||
|
||||
Pre-built dashboards for rippled observability.
|
||||
|
||||
### 7.6.1 Consensus Health Dashboard
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Consensus Health",
|
||||
"uid": "rippled-consensus-health",
|
||||
"tags": ["rippled", "consensus", "tracing"],
|
||||
"panels": [
|
||||
{
|
||||
"title": "Consensus Round Duration",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"consensus.round\"} | avg(duration) by (resource.service.instance.id)"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "ms",
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 4000 },
|
||||
{ "color": "red", "value": 5000 }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 8, "w": 12, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Phase Duration Breakdown",
|
||||
"type": "barchart",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=~\"consensus.phase.*\"} | avg(duration) by (name)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 12, "x": 12, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Proposers per Round",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"consensus.round\"} | avg(span.xrpl.consensus.proposers)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 6, "x": 0, "y": 8 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Slow Rounds (>5s)",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"consensus.round\"} | duration > 5s"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 8, "w": 24, "x": 0, "y": 12 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 7.6.2 Node Overview Dashboard
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Node Overview",
|
||||
"uid": "rippled-node-overview",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Active Nodes",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\"} | count_over_time() by (resource.service.instance.id) | count()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 4, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Total Transactions (1h)",
|
||||
"type": "stat",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.receive\"} | count()"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 4, "w": 4, "x": 4, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Error Rate",
|
||||
"type": "gauge",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && status.code=error} | rate() / {resource.service.name=\"rippled\"} | rate() * 100"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"unit": "percent",
|
||||
"max": 10,
|
||||
"thresholds": {
|
||||
"steps": [
|
||||
{ "color": "green", "value": null },
|
||||
{ "color": "yellow", "value": 1 },
|
||||
{ "color": "red", "value": 5 }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 4, "w": 4, "x": 8, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Service Map",
|
||||
"type": "nodeGraph",
|
||||
"datasource": "Tempo",
|
||||
"gridPos": { "h": 12, "w": 12, "x": 12, "y": 0 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 7.6.3 Alert Rules
|
||||
|
||||
```yaml
|
||||
# grafana/provisioning/alerting/rippled-alerts.yaml
|
||||
apiVersion: 1
|
||||
|
||||
groups:
|
||||
- name: rippled-tracing-alerts
|
||||
folder: rippled
|
||||
interval: 1m
|
||||
rules:
|
||||
- uid: consensus-slow
|
||||
title: Consensus Round Slow
|
||||
condition: A
|
||||
data:
|
||||
- refId: A
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="rippled" && name="consensus.round"} | avg(duration) > 5s'
|
||||
for: 5m
|
||||
annotations:
|
||||
summary: Consensus rounds taking >5 seconds
|
||||
description: "Consensus duration: {{ $value }}ms"
|
||||
labels:
|
||||
severity: warning
|
||||
|
||||
- uid: rpc-error-spike
|
||||
title: RPC Error Rate Spike
|
||||
condition: B
|
||||
data:
|
||||
- refId: B
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="rippled" && name=~"rpc.command.*" && status.code=error} | rate() > 0.05'
|
||||
for: 2m
|
||||
annotations:
|
||||
summary: RPC error rate >5%
|
||||
labels:
|
||||
severity: critical
|
||||
|
||||
- uid: tx-throughput-drop
|
||||
title: Transaction Throughput Drop
|
||||
condition: C
|
||||
data:
|
||||
- refId: C
|
||||
datasourceUid: tempo
|
||||
model:
|
||||
queryType: traceql
|
||||
query: '{resource.service.name="rippled" && name="tx.receive"} | rate() < 10'
|
||||
for: 10m
|
||||
annotations:
|
||||
summary: Transaction throughput below threshold
|
||||
labels:
|
||||
severity: warning
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7.7 PerfLog and Insight Correlation
|
||||
|
||||
How to correlate OpenTelemetry traces with existing rippled observability.
|
||||
|
||||
### 7.7.1 Correlation Architecture
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph rippled["rippled Node"]
|
||||
otel[OpenTelemetry<br/>Spans]
|
||||
perflog[PerfLog<br/>JSON Logs]
|
||||
insight[Beast Insight<br/>StatsD Metrics]
|
||||
end
|
||||
|
||||
subgraph collectors["Data Collection"]
|
||||
otelc[OTel Collector]
|
||||
promtail[Promtail/Fluentd]
|
||||
statsd[StatsD Exporter]
|
||||
end
|
||||
|
||||
subgraph storage["Storage"]
|
||||
tempo[(Tempo)]
|
||||
loki[(Loki)]
|
||||
prom[(Prometheus)]
|
||||
end
|
||||
|
||||
subgraph grafana["Grafana"]
|
||||
traces[Trace View]
|
||||
logs[Log View]
|
||||
metrics[Metrics View]
|
||||
corr[Correlation<br/>Panel]
|
||||
end
|
||||
|
||||
otel -->|OTLP| otelc --> tempo
|
||||
perflog -->|JSON| promtail --> loki
|
||||
insight -->|StatsD| statsd --> prom
|
||||
|
||||
tempo --> traces
|
||||
loki --> logs
|
||||
prom --> metrics
|
||||
|
||||
traces --> corr
|
||||
logs --> corr
|
||||
metrics --> corr
|
||||
|
||||
style rippled fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style collectors fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style storage fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style grafana fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style otel fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style perflog fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style insight fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style otelc fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style promtail fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style statsd fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style tempo fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style loki fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style prom fill:#1b5e20,stroke:#0d3d14,color:#fff
|
||||
style traces fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style logs fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style metrics fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style corr fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
### 7.7.2 Correlation Fields
|
||||
|
||||
| Source | Field | Link To | Purpose |
|
||||
| ----------- | --------------------------- | ------------- | -------------------------- |
|
||||
| **Trace** | `trace_id` | Logs | Find log entries for trace |
|
||||
| **Trace** | `xrpl.tx.hash` | Logs, Metrics | Find TX-related data |
|
||||
| **Trace** | `xrpl.consensus.ledger.seq` | Logs | Find ledger-related logs |
|
||||
| **PerfLog** | `trace_id` (new) | Traces | Jump to trace from log |
|
||||
| **PerfLog** | `ledger_seq` | Traces | Find consensus trace |
|
||||
| **Insight** | `exemplar.trace_id` | Traces | Jump from metric spike |
|
||||
|
||||
### 7.7.3 Example: Debugging a Slow Transaction
|
||||
|
||||
**Step 1: Find the trace**
|
||||
```
|
||||
# In Grafana Explore with Tempo
|
||||
{resource.service.name="rippled" && span.xrpl.tx.hash="ABC123..."}
|
||||
```
|
||||
|
||||
**Step 2: Get the trace_id from the trace view**
|
||||
```
|
||||
Trace ID: 4bf92f3577b34da6a3ce929d0e0e4736
|
||||
```
|
||||
|
||||
**Step 3: Find related PerfLog entries**
|
||||
```
|
||||
# In Grafana Explore with Loki
|
||||
{job="rippled"} |= "4bf92f3577b34da6a3ce929d0e0e4736"
|
||||
```
|
||||
|
||||
**Step 4: Check Insight metrics for the time window**
|
||||
```
|
||||
# In Grafana with Prometheus
|
||||
rate(rippled_tx_applied_total[1m])
|
||||
@ timestamp_from_trace
|
||||
```
|
||||
|
||||
### 7.7.4 Unified Dashboard Example
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "rippled Unified Observability",
|
||||
"uid": "rippled-unified",
|
||||
"panels": [
|
||||
{
|
||||
"title": "Transaction Latency (Traces)",
|
||||
"type": "timeseries",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\" && name=\"tx.receive\"} | histogram_over_time(duration)"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 6, "w": 8, "x": 0, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Transaction Rate (Metrics)",
|
||||
"type": "timeseries",
|
||||
"datasource": "Prometheus",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "rate(rippled_tx_received_total[5m])",
|
||||
"legendFormat": "{{ instance }}"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"links": [
|
||||
{
|
||||
"title": "View traces",
|
||||
"url": "/explore?left={\"datasource\":\"Tempo\",\"query\":\"{resource.service.name=\\\"rippled\\\" && name=\\\"tx.receive\\\"}\"}"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"gridPos": { "h": 6, "w": 8, "x": 8, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Recent Logs",
|
||||
"type": "logs",
|
||||
"datasource": "Loki",
|
||||
"targets": [
|
||||
{
|
||||
"expr": "{job=\"rippled\"} | json"
|
||||
}
|
||||
],
|
||||
"gridPos": { "h": 6, "w": 8, "x": 16, "y": 0 }
|
||||
},
|
||||
{
|
||||
"title": "Trace Search",
|
||||
"type": "table",
|
||||
"datasource": "Tempo",
|
||||
"targets": [
|
||||
{
|
||||
"queryType": "traceql",
|
||||
"query": "{resource.service.name=\"rippled\"}"
|
||||
}
|
||||
],
|
||||
"fieldConfig": {
|
||||
"overrides": [
|
||||
{
|
||||
"matcher": { "id": "byName", "options": "traceID" },
|
||||
"properties": [
|
||||
{
|
||||
"id": "links",
|
||||
"value": [
|
||||
{
|
||||
"title": "View trace",
|
||||
"url": "/explore?left={\"datasource\":\"Tempo\",\"query\":\"${__value.raw}\"}"
|
||||
},
|
||||
{
|
||||
"title": "View logs",
|
||||
"url": "/explore?left={\"datasource\":\"Loki\",\"query\":\"{job=\\\"rippled\\\"} |= \\\"${__value.raw}\\\"\"}"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": { "h": 12, "w": 24, "x": 0, "y": 6 }
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Implementation Phases](./06-implementation-phases.md)* | *Next: [Appendix](./08-appendix.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
133
OpenTelemetryPlan/08-appendix.md
Normal file
133
OpenTelemetryPlan/08-appendix.md
Normal file
@@ -0,0 +1,133 @@
|
||||
# Appendix
|
||||
|
||||
> **Parent Document**: [OpenTelemetryPlan.md](./OpenTelemetryPlan.md)
|
||||
> **Related**: [Observability Backends](./07-observability-backends.md)
|
||||
|
||||
---
|
||||
|
||||
## 8.1 Glossary
|
||||
|
||||
| Term | Definition |
|
||||
| --------------------- | ---------------------------------------------------------- |
|
||||
| **Span** | A unit of work with start/end time, name, and attributes |
|
||||
| **Trace** | A collection of spans representing a complete request flow |
|
||||
| **Trace ID** | 128-bit unique identifier for a trace |
|
||||
| **Span ID** | 64-bit unique identifier for a span within a trace |
|
||||
| **Context** | Carrier for trace/span IDs across boundaries |
|
||||
| **Propagator** | Component that injects/extracts context |
|
||||
| **Sampler** | Decides which traces to record |
|
||||
| **Exporter** | Sends spans to backend |
|
||||
| **Collector** | Receives, processes, and forwards telemetry |
|
||||
| **OTLP** | OpenTelemetry Protocol (wire format) |
|
||||
| **W3C Trace Context** | Standard HTTP headers for trace propagation |
|
||||
| **Baggage** | Key-value pairs propagated across service boundaries |
|
||||
| **Resource** | Entity producing telemetry (service, host, etc.) |
|
||||
| **Instrumentation** | Code that creates telemetry data |
|
||||
|
||||
### rippled-Specific Terms
|
||||
|
||||
| Term | Definition |
|
||||
| ----------------- | -------------------------------------------------- |
|
||||
| **Overlay** | P2P network layer managing peer connections |
|
||||
| **Consensus** | XRP Ledger consensus algorithm (RCL) |
|
||||
| **Proposal** | Validator's suggested transaction set for a ledger |
|
||||
| **Validation** | Validator's signature on a closed ledger |
|
||||
| **HashRouter** | Component for transaction deduplication |
|
||||
| **JobQueue** | Thread pool for asynchronous task execution |
|
||||
| **PerfLog** | Existing performance logging system in rippled |
|
||||
| **Beast Insight** | Existing metrics framework in rippled |
|
||||
|
||||
---
|
||||
|
||||
## 8.2 Span Hierarchy Visualization
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
subgraph trace["Trace: Transaction Lifecycle"]
|
||||
rpc["rpc.submit<br/>(entry point)"]
|
||||
validate["tx.validate"]
|
||||
relay["tx.relay<br/>(parent span)"]
|
||||
|
||||
subgraph peers["Peer Spans"]
|
||||
p1["peer.send<br/>Peer A"]
|
||||
p2["peer.send<br/>Peer B"]
|
||||
p3["peer.send<br/>Peer C"]
|
||||
end
|
||||
|
||||
consensus["consensus.round"]
|
||||
apply["tx.apply"]
|
||||
end
|
||||
|
||||
rpc --> validate
|
||||
validate --> relay
|
||||
relay --> p1
|
||||
relay --> p2
|
||||
relay --> p3
|
||||
p1 -.->|"context propagation"| consensus
|
||||
consensus --> apply
|
||||
|
||||
style trace fill:#0f172a,stroke:#020617,color:#fff
|
||||
style peers fill:#1e3a8a,stroke:#172554,color:#fff
|
||||
style rpc fill:#1d4ed8,stroke:#1e40af,color:#fff
|
||||
style validate fill:#047857,stroke:#064e3b,color:#fff
|
||||
style relay fill:#047857,stroke:#064e3b,color:#fff
|
||||
style p1 fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style p2 fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style p3 fill:#0e7490,stroke:#155e75,color:#fff
|
||||
style consensus fill:#fef3c7,stroke:#fde68a,color:#1e293b
|
||||
style apply fill:#047857,stroke:#064e3b,color:#fff
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8.3 References
|
||||
|
||||
### OpenTelemetry Resources
|
||||
|
||||
1. [OpenTelemetry C++ SDK](https://github.com/open-telemetry/opentelemetry-cpp)
|
||||
2. [OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel/)
|
||||
3. [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/)
|
||||
4. [OTLP Protocol Specification](https://opentelemetry.io/docs/specs/otlp/)
|
||||
|
||||
### Standards
|
||||
|
||||
5. [W3C Trace Context](https://www.w3.org/TR/trace-context/)
|
||||
6. [W3C Baggage](https://www.w3.org/TR/baggage/)
|
||||
7. [Protocol Buffers](https://protobuf.dev/)
|
||||
|
||||
### rippled Resources
|
||||
|
||||
8. [rippled Source Code](https://github.com/XRPLF/rippled)
|
||||
9. [XRP Ledger Documentation](https://xrpl.org/docs/)
|
||||
10. [rippled Overlay README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/overlay/README.md)
|
||||
11. [rippled RPC README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/rpc/README.md)
|
||||
12. [rippled Consensus README](https://github.com/XRPLF/rippled/blob/develop/src/xrpld/app/consensus/README.md)
|
||||
|
||||
---
|
||||
|
||||
## 8.4 Version History
|
||||
|
||||
| Version | Date | Author | Changes |
|
||||
| ------- | ---------- | ------ | --------------------------------- |
|
||||
| 1.0 | 2026-02-12 | - | Initial implementation plan |
|
||||
| 1.1 | 2026-02-13 | - | Refactored into modular documents |
|
||||
|
||||
---
|
||||
|
||||
## 8.5 Document Index
|
||||
|
||||
| Document | Description |
|
||||
| ---------------------------------------------------------------- | ------------------------------------------ |
|
||||
| [OpenTelemetryPlan.md](./OpenTelemetryPlan.md) | Master overview and executive summary |
|
||||
| [01-architecture-analysis.md](./01-architecture-analysis.md) | rippled architecture and trace points |
|
||||
| [02-design-decisions.md](./02-design-decisions.md) | SDK selection, exporters, span conventions |
|
||||
| [03-implementation-strategy.md](./03-implementation-strategy.md) | Directory structure, performance analysis |
|
||||
| [04-code-samples.md](./04-code-samples.md) | C++ code examples for all components |
|
||||
| [05-configuration-reference.md](./05-configuration-reference.md) | rippled config, CMake, Collector configs |
|
||||
| [06-implementation-phases.md](./06-implementation-phases.md) | Timeline, tasks, risks, success metrics |
|
||||
| [07-observability-backends.md](./07-observability-backends.md) | Backend selection and architecture |
|
||||
| [08-appendix.md](./08-appendix.md) | Glossary, references, version history |
|
||||
|
||||
---
|
||||
|
||||
*Previous: [Observability Backends](./07-observability-backends.md)* | *Back to: [Overview](./OpenTelemetryPlan.md)*
|
||||
189
OpenTelemetryPlan/OpenTelemetryPlan.md
Normal file
189
OpenTelemetryPlan/OpenTelemetryPlan.md
Normal file
@@ -0,0 +1,189 @@
|
||||
# [OpenTelemetry](00-tracing-fundamentals.md) Distributed Tracing Implementation Plan for rippled (xrpld)
|
||||
|
||||
## Executive Summary
|
||||
|
||||
This document provides a comprehensive implementation plan for integrating OpenTelemetry distributed tracing into the rippled XRP Ledger node software. The plan addresses the unique challenges of a decentralized peer-to-peer system where trace context must propagate across network boundaries between independent nodes.
|
||||
|
||||
### Key Benefits
|
||||
|
||||
- **End-to-end transaction visibility**: Track transactions from submission through consensus to ledger inclusion
|
||||
- **Consensus round analysis**: Understand timing and behavior of consensus phases across validators
|
||||
- **RPC performance insights**: Identify slow handlers and optimize response times
|
||||
- **Network topology understanding**: Visualize message propagation patterns between peers
|
||||
- **Incident debugging**: Correlate events across distributed nodes during issues
|
||||
|
||||
### Estimated Performance Overhead
|
||||
|
||||
| Metric | Overhead | Notes |
|
||||
| ------------- | ---------- | ----------------------------------- |
|
||||
| CPU | 1-3% | Span creation and attribute setting |
|
||||
| Memory | 2-5 MB | Batch buffer for pending spans |
|
||||
| Network | 10-50 KB/s | Compressed OTLP export to collector |
|
||||
| Latency (p99) | <2% | With proper sampling configuration |
|
||||
|
||||
---
|
||||
|
||||
## Document Structure
|
||||
|
||||
This implementation plan is organized into modular documents for easier navigation:
|
||||
|
||||
<div align="center">
|
||||
|
||||
```mermaid
|
||||
flowchart TB
|
||||
overview["📋 OpenTelemetryPlan.md<br/>(This Document)"]
|
||||
|
||||
subgraph analysis["Analysis & Design"]
|
||||
arch["01-architecture-analysis.md"]
|
||||
design["02-design-decisions.md"]
|
||||
end
|
||||
|
||||
subgraph impl["Implementation"]
|
||||
strategy["03-implementation-strategy.md"]
|
||||
code["04-code-samples.md"]
|
||||
config["05-configuration-reference.md"]
|
||||
end
|
||||
|
||||
subgraph deploy["Deployment & Planning"]
|
||||
phases["06-implementation-phases.md"]
|
||||
backends["07-observability-backends.md"]
|
||||
appendix["08-appendix.md"]
|
||||
end
|
||||
|
||||
overview --> analysis
|
||||
overview --> impl
|
||||
overview --> deploy
|
||||
|
||||
arch --> design
|
||||
design --> strategy
|
||||
strategy --> code
|
||||
code --> config
|
||||
config --> phases
|
||||
phases --> backends
|
||||
backends --> appendix
|
||||
|
||||
style overview fill:#1b5e20,stroke:#0d3d14,color:#fff,stroke-width:2px
|
||||
style analysis fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style impl fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style deploy fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style arch fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style design fill:#0d47a1,stroke:#082f6a,color:#fff
|
||||
style strategy fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style code fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style config fill:#bf360c,stroke:#8c2809,color:#fff
|
||||
style phases fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style backends fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
style appendix fill:#4a148c,stroke:#2e0d57,color:#fff
|
||||
```
|
||||
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
| Section | Document | Description |
|
||||
| ------- | ---------------------------------------------------------- | ---------------------------------------------------------------------- |
|
||||
| **1** | [Architecture Analysis](./01-architecture-analysis.md) | rippled component analysis, trace points, instrumentation priorities |
|
||||
| **2** | [Design Decisions](./02-design-decisions.md) | SDK selection, exporters, span naming, attributes, context propagation |
|
||||
| **3** | [Implementation Strategy](./03-implementation-strategy.md) | Directory structure, key principles, performance optimization |
|
||||
| **4** | [Code Samples](./04-code-samples.md) | Complete C++ implementation examples for all components |
|
||||
| **5** | [Configuration Reference](./05-configuration-reference.md) | rippled config, CMake integration, Collector configurations |
|
||||
| **6** | [Implementation Phases](./06-implementation-phases.md) | 5-phase timeline, tasks, risks, success metrics |
|
||||
| **7** | [Observability Backends](./07-observability-backends.md) | Backend selection guide and production architecture |
|
||||
| **8** | [Appendix](./08-appendix.md) | Glossary, references, version history |
|
||||
|
||||
---
|
||||
|
||||
## 1. Architecture Analysis
|
||||
|
||||
The rippled node consists of several key components that require instrumentation for comprehensive distributed tracing. The main areas include the RPC server (HTTP/WebSocket), Overlay P2P network, Consensus mechanism (RCLConsensus), JobQueue for async task execution, and existing observability infrastructure (PerfLog, Insight/StatsD, Journal logging).
|
||||
|
||||
Key trace points span across transaction submission via RPC, peer-to-peer message propagation, consensus round execution, and ledger building. The implementation prioritizes high-value, low-risk components first: RPC handlers provide immediate value with minimal risk, while consensus tracing requires careful implementation to avoid timing impacts.
|
||||
|
||||
➡️ **[Read full Architecture Analysis](./01-architecture-analysis.md)**
|
||||
|
||||
---
|
||||
|
||||
## 2. Design Decisions
|
||||
|
||||
The OpenTelemetry C++ SDK is selected for its CNCF backing, active development, and native performance characteristics. Traces are exported via OTLP/gRPC (primary) or OTLP/HTTP (fallback) to an OpenTelemetry Collector, which provides flexible routing and sampling.
|
||||
|
||||
Span naming follows a hierarchical `<component>.<operation>` convention (e.g., `rpc.submit`, `tx.relay`, `consensus.round`). Context propagation uses W3C Trace Context headers for HTTP and embedded Protocol Buffer fields for P2P messages. The implementation coexists with existing PerfLog and Insight observability systems through correlation IDs.
|
||||
|
||||
**Data Collection & Privacy**: Telemetry collects only operational metadata (timing, counts, hashes) — never sensitive content (private keys, balances, amounts, raw payloads). Privacy protection includes account hashing, configurable redaction, sampling, and collector-level filtering. Node operators retain full control(not penned down in this document yet) over what data is exported.
|
||||
|
||||
➡️ **[Read full Design Decisions](./02-design-decisions.md)**
|
||||
|
||||
---
|
||||
|
||||
## 3. Implementation Strategy
|
||||
|
||||
The telemetry code is organized under `include/xrpl/telemetry/` for headers and `src/libxrpl/telemetry/` for implementation. Key principles include RAII-based span management via `SpanGuard`, conditional compilation with `XRPL_ENABLE_TELEMETRY`, and minimal runtime overhead through batch processing and efficient sampling.
|
||||
|
||||
Performance optimization strategies include probabilistic head sampling (10% default), tail-based sampling at the collector for errors and slow traces, batch export to reduce network overhead, and conditional instrumentation that compiles to no-ops when disabled.
|
||||
|
||||
➡️ **[Read full Implementation Strategy](./03-implementation-strategy.md)**
|
||||
|
||||
---
|
||||
|
||||
## 4. Code Samples
|
||||
|
||||
Complete C++ implementation examples are provided for all telemetry components:
|
||||
- `Telemetry.h` - Core interface for tracer access and span creation
|
||||
- `SpanGuard.h` - RAII wrapper for automatic span lifecycle management
|
||||
- `TracingInstrumentation.h` - Macros for conditional instrumentation
|
||||
- Protocol Buffer extensions for trace context propagation
|
||||
- Module-specific instrumentation (RPC, Consensus, P2P, JobQueue)
|
||||
|
||||
➡️ **[View all Code Samples](./04-code-samples.md)**
|
||||
|
||||
---
|
||||
|
||||
## 5. Configuration Reference
|
||||
|
||||
Configuration is handled through the `[telemetry]` section in `xrpld.cfg` with options for enabling/disabling, exporter selection, endpoint configuration, sampling ratios, and component-level filtering. CMake integration includes a `XRPL_ENABLE_TELEMETRY` option for compile-time control.
|
||||
|
||||
OpenTelemetry Collector configurations are provided for development (with Jaeger) and production (with tail-based sampling, Tempo, and Elastic APM). Docker Compose examples enable quick local development environment setup.
|
||||
|
||||
➡️ **[View full Configuration Reference](./05-configuration-reference.md)**
|
||||
|
||||
---
|
||||
|
||||
## 6. Implementation Phases
|
||||
|
||||
The implementation spans 9 weeks across 5 phases:
|
||||
|
||||
| Phase | Duration | Focus | Key Deliverables |
|
||||
| ----- | --------- | ------------------- | --------------------------------------------------- |
|
||||
| 1 | Weeks 1-2 | Core Infrastructure | SDK integration, Telemetry interface, Configuration |
|
||||
| 2 | Weeks 3-4 | RPC Tracing | HTTP context extraction, Handler instrumentation |
|
||||
| 3 | Weeks 5-6 | Transaction Tracing | Protocol Buffer context, Relay propagation |
|
||||
| 4 | Weeks 7-8 | Consensus Tracing | Round spans, Proposal/validation tracing |
|
||||
| 5 | Week 9 | Documentation | Runbook, Dashboards, Training |
|
||||
|
||||
**Total Effort**: 47 developer-days with 2 developers
|
||||
|
||||
➡️ **[View full Implementation Phases](./06-implementation-phases.md)**
|
||||
|
||||
---
|
||||
|
||||
## 7. Observability Backends
|
||||
|
||||
For development and testing, Jaeger provides easy setup with a good UI. For production deployments, Grafana Tempo is recommended for its cost-effectiveness and Grafana integration, while Elastic APM is ideal for organizations with existing Elastic infrastructure.
|
||||
|
||||
The recommended production architecture uses a gateway collector pattern with regional collectors performing tail-based sampling, routing traces to multiple backends (Tempo for primary storage, Elastic for log correlation, S3/GCS for long-term archive).
|
||||
|
||||
➡️ **[View Observability Backend Recommendations](./07-observability-backends.md)**
|
||||
|
||||
---
|
||||
|
||||
## 8. Appendix
|
||||
|
||||
The appendix contains a glossary of OpenTelemetry and rippled-specific terms, references to external documentation and specifications, version history for this implementation plan, and a complete document index.
|
||||
|
||||
➡️ **[View Appendix](./08-appendix.md)**
|
||||
|
||||
---
|
||||
|
||||
*This document provides a comprehensive implementation plan for integrating OpenTelemetry distributed tracing into the rippled XRP Ledger node software. For detailed information on any section, follow the links to the corresponding sub-documents.*
|
||||
50
README.md
50
README.md
@@ -1,51 +1,54 @@
|
||||
[](https://codecov.io/gh/XRPLF/rippled)
|
||||
|
||||
# The XRP Ledger
|
||||
|
||||
The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powered by a network of peer-to-peer nodes. The XRP Ledger uses a novel Byzantine Fault Tolerant consensus algorithm to settle and record transactions in a secure distributed database without a central operator.
|
||||
|
||||
## XRP
|
||||
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
|
||||
|
||||
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free crypto-asset native to the XRP Ledger, and is designed as a gas token for network services and to bridge different currencies. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
|
||||
|
||||
## rippled
|
||||
|
||||
The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html).
|
||||
|
||||
If you are interested in running an **API Server** (including a **Full History Server**) or a **Reporting Mode** server, take a look at [Clio](https://github.com/XRPLF/clio). rippled Reporting Mode is expected to be replaced by Clio.
|
||||
If you are interested in running an **API Server** (including a **Full History Server**), take a look at [Clio](https://github.com/XRPLF/clio). (rippled Reporting Mode has been replaced by Clio.)
|
||||
|
||||
### Build from Source
|
||||
|
||||
* [Read the build instructions in `BUILD.md`](BUILD.md)
|
||||
* If you encounter any issues, please [open an issue](https://github.com/XRPLF/rippled/issues)
|
||||
- [Read the build instructions in `BUILD.md`](BUILD.md)
|
||||
- If you encounter any issues, please [open an issue](https://github.com/XRPLF/rippled/issues)
|
||||
|
||||
## Key Features of the XRP Ledger
|
||||
|
||||
- **[Censorship-Resistant Transaction Processing][]:** No single party decides which transactions succeed or fail, and no one can "roll back" a transaction after it completes. As long as those who choose to participate in the network keep it healthy, they can settle transactions in seconds.
|
||||
- **[Fast, Efficient Consensus Algorithm][]:** The XRP Ledger's consensus algorithm settles transactions in 4 to 5 seconds, processing at a throughput of up to 1500 transactions per second. These properties put XRP at least an order of magnitude ahead of other top digital assets.
|
||||
- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction costs.
|
||||
- **[Responsible Software Governance][]:** A team of full-time, world-class developers at Ripple maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests, and builds constructive relationships with governments and financial institutions worldwide.
|
||||
- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction fees.
|
||||
- **[Responsible Software Governance][]:** A team of full-time developers at Ripple & other organizations maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests.
|
||||
- **[Secure, Adaptable Cryptography][]:** The XRP Ledger relies on industry standard digital signature systems like ECDSA (the same scheme used by Bitcoin) but also supports modern, efficient algorithms like Ed25519. The extensible nature of the XRP Ledger's software makes it possible to add and disable algorithms as the state of the art in cryptography advances.
|
||||
- **[Modern Features for Smart Contracts][]:** Features like Escrow, Checks, and Payment Channels support cutting-edge financial applications including the [Interledger Protocol](https://interledger.org/). This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints.
|
||||
- **[Modern Features][]:** Features like Escrow, Checks, and Payment Channels support financial applications atop of the XRP Ledger. This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints.
|
||||
- **[On-Ledger Decentralized Exchange][]:** In addition to all the features that make XRP useful on its own, the XRP Ledger also has a fully-functional accounting system for tracking and trading obligations denominated in any way users want, and an exchange built into the protocol. The XRP Ledger can settle long, cross-currency payment paths and exchanges of multiple currencies in atomic transactions, bridging gaps of trust with XRP.
|
||||
|
||||
[Censorship-Resistant Transaction Processing]: https://xrpl.org/xrp-ledger-overview.html#censorship-resistant-transaction-processing
|
||||
[Fast, Efficient Consensus Algorithm]: https://xrpl.org/xrp-ledger-overview.html#fast-efficient-consensus-algorithm
|
||||
[Finite XRP Supply]: https://xrpl.org/xrp-ledger-overview.html#finite-xrp-supply
|
||||
[Responsible Software Governance]: https://xrpl.org/xrp-ledger-overview.html#responsible-software-governance
|
||||
[Secure, Adaptable Cryptography]: https://xrpl.org/xrp-ledger-overview.html#secure-adaptable-cryptography
|
||||
[Modern Features for Smart Contracts]: https://xrpl.org/xrp-ledger-overview.html#modern-features-for-smart-contracts
|
||||
[On-Ledger Decentralized Exchange]: https://xrpl.org/xrp-ledger-overview.html#on-ledger-decentralized-exchange
|
||||
|
||||
[Censorship-Resistant Transaction Processing]: https://xrpl.org/transaction-censorship-detection.html#transaction-censorship-detection
|
||||
[Fast, Efficient Consensus Algorithm]: https://xrpl.org/consensus-research.html#consensus-research
|
||||
[Finite XRP Supply]: https://xrpl.org/what-is-xrp.html
|
||||
[Responsible Software Governance]: https://xrpl.org/contribute-code.html#contribute-code-to-the-xrp-ledger
|
||||
[Secure, Adaptable Cryptography]: https://xrpl.org/cryptographic-keys.html#cryptographic-keys
|
||||
[Modern Features]: https://xrpl.org/use-specialized-payment-types.html
|
||||
[On-Ledger Decentralized Exchange]: https://xrpl.org/decentralized-exchange.html#decentralized-exchange
|
||||
|
||||
## Source Code
|
||||
|
||||
Here are some good places to start learning the source code:
|
||||
|
||||
- Read the markdown files in the source tree: `src/ripple/**/*.md`.
|
||||
- Read [the levelization document](./Builds/levelization) to get an idea of the internal dependency graph.
|
||||
- Read [the levelization document](.github/scripts/levelization) to get an idea of the internal dependency graph.
|
||||
- In the big picture, the `main` function constructs an `ApplicationImp` object, which implements the `Application` virtual interface. Almost every component in the application takes an `Application&` parameter in its constructor, typically named `app` and stored as a member variable `app_`. This allows most components to depend on any other component.
|
||||
|
||||
### Repository Contents
|
||||
|
||||
| Folder | Contents |
|
||||
|:-----------|:-------------------------------------------------|
|
||||
| :--------- | :----------------------------------------------- |
|
||||
| `./bin` | Scripts and data files for Ripple integrators. |
|
||||
| `./Builds` | Platform-specific guides for building `rippled`. |
|
||||
| `./docs` | Source documentation files and doxygen config. |
|
||||
@@ -55,15 +58,14 @@ Here are some good places to start learning the source code:
|
||||
Some of the directories under `src` are external repositories included using
|
||||
git-subtree. See those directories' README files for more details.
|
||||
|
||||
|
||||
## Additional Documentation
|
||||
|
||||
* [XRP Ledger Dev Portal](https://xrpl.org/)
|
||||
* [Setup and Installation](https://xrpl.org/install-rippled.html)
|
||||
* [Source Documentation (Doxygen)](https://xrplf.github.io/rippled/)
|
||||
- [XRP Ledger Dev Portal](https://xrpl.org/)
|
||||
- [Setup and Installation](https://xrpl.org/install-rippled.html)
|
||||
- [Source Documentation (Doxygen)](https://xrplf.github.io/rippled/)
|
||||
|
||||
## See Also
|
||||
|
||||
* [Clio API Server for the XRP Ledger](https://github.com/XRPLF/clio)
|
||||
* [Mailing List for Release Announcements](https://groups.google.com/g/ripple-server)
|
||||
* [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi)
|
||||
- [Clio API Server for the XRP Ledger](https://github.com/XRPLF/clio)
|
||||
- [Mailing List for Release Announcements](https://groups.google.com/g/ripple-server)
|
||||
- [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi)
|
||||
|
||||
4330
RELEASENOTES.md
4330
RELEASENOTES.md
File diff suppressed because it is too large
Load Diff
123
SECURITY.md
123
SECURITY.md
@@ -2,7 +2,6 @@
|
||||
|
||||
For more details on operating an XRP Ledger server securely, please visit https://xrpl.org/manage-the-rippled-server.html.
|
||||
|
||||
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
@@ -43,7 +42,7 @@ For more information on responsible disclosure, please read this [Wikipedia arti
|
||||
|
||||
## Report Handling Process
|
||||
|
||||
Please report the bug directly to us and limit further disclosure. If you want to prove that you knew the bug as of a given time, consider using a cryptographic precommitment: hash the content of your report and publish the hash on a medium of your choice (e.g. on Twitter or as a memo in a transaction) as "proof" that you had written the text at a given point in time.
|
||||
Please report the bug directly to us and limit further disclosure. If you want to prove that you knew the bug as of a given time, consider using a cryptographic pre-commitment: hash the content of your report and publish the hash on a medium of your choice (e.g. on Twitter or as a memo in a transaction) as "proof" that you had written the text at a given point in time.
|
||||
|
||||
Once we receive a report, we:
|
||||
|
||||
@@ -77,73 +76,63 @@ The amount paid varies dramatically. Vulnerabilities that are harmless on their
|
||||
|
||||
To report a qualifying bug, please send a detailed report to:
|
||||
|
||||
|Email Address|bugs@ripple.com |
|
||||
|:-----------:|:----------------------------------------------------|
|
||||
|Short Key ID | `0xC57929BE` |
|
||||
|Long Key ID | `0xCD49A0AFC57929BE` |
|
||||
|Fingerprint | `24E6 3B02 37E0 FA9C 5E96 8974 CD49 A0AF C579 29BE` |
|
||||
| Email Address | bugs@ripple.com |
|
||||
| :-----------: | :-------------------------------------------------- |
|
||||
| Short Key ID | `0xA9F514E0` |
|
||||
| Long Key ID | `0xD900855AA9F514E0` |
|
||||
| Fingerprint | `B72C 0654 2F2A E250 2763 A268 D900 855A A9F5 14E0` |
|
||||
|
||||
The full PGP key for this address, which is also available on several key servers (e.g. on [keyserver.ubuntu.com](https://keyserver.ubuntu.com)), is:
|
||||
|
||||
The full PGP key for this address, which is also available on several key servers (e.g. on [keys.gnupg.net](https://keys.gnupg.net)), is:
|
||||
```
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
mQINBFUwGHYBEAC0wpGpBPkd8W1UdQjg9+cEFzeIEJRaoZoeuJD8mofwI5Ejnjdt
|
||||
kCpUYEDal0ygkKobu8SzOoATcDl18iCrScX39VpTm96vISFZMhmOryYCIp4QLJNN
|
||||
4HKc2ZdBj6W4igNi6vj5Qo6JMyGpLY2mz4CZskbt0TNuUxWrGood+UrCzpY8x7/N
|
||||
a93fcvNw+prgCr0rCH3hAPmAFfsOBbtGzNnmq7xf3jg5r4Z4sDiNIF1X1y53DAfV
|
||||
rWDx49IKsuCEJfPMp1MnBSvDvLaQ2hKXs+cOpx1BCZgHn3skouEUxxgqbtTzBLt1
|
||||
xXpmuijsaltWngPnGO7mOAzbpZSdBm82/Emrk9bPMuD0QaLQjWr7HkTSUs6ZsKt4
|
||||
7CLPdWqxyY/QVw9UaxeHEtWGQGMIQGgVJGh1fjtUr5O1sC9z9jXcQ0HuIHnRCTls
|
||||
GP7hklJmfH5V4SyAJQ06/hLuEhUJ7dn+BlqCsT0tLmYTgZYNzNcLHcqBFMEZHvHw
|
||||
9GENMx/tDXgajKql4bJnzuTK0iGU/YepanANLd1JHECJ4jzTtmKOus9SOGlB2/l1
|
||||
0t0ADDYAS3eqOdOcUvo9ElSLCI5vSVHhShSte/n2FMWU+kMUboTUisEG8CgQnrng
|
||||
g2CvvQvqDkeOtZeqMcC7HdiZS0q3LJUWtwA/ViwxrVlBDCxiTUXCotyBWwARAQAB
|
||||
tDBSaXBwbGUgTGFicyBCdWcgQm91bnR5IFByb2dyYW0gPGJ1Z3NAcmlwcGxlLmNv
|
||||
bT6JAjcEEwEKACEFAlUwGHYCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ
|
||||
zUmgr8V5Kb6R0g//SwY/mVJY59k87iL26/KayauSoOcz7xjcST26l4ZHVVX85gOY
|
||||
HYZl8k0+m8X3zxeYm9a3QAoAml8sfoaFRFQP8ynnefRrLUPaZ2MjbJ0SACMwZNef
|
||||
T6o7Mi8LBAaiNZdYVyIfX1oM6YXtqYkuJdav6ZCyvVYqc9OvMJPY2ZzJYuI/ZtvQ
|
||||
/lTndxCeg9ALNX/iezOLGdfMpf4HuIFVwcPPlwGi+HDlB9/bggDEHC8z434SXVFc
|
||||
aQatXAPcDkjMUweU7y0CZtYEj00HITd4pSX6MqGiHrxlDZTqinCOPs1Ieqp7qufs
|
||||
MzlM6irLGucxj1+wa16ieyYvEtGaPIsksUKkywx0O7cf8N2qKg+eIkUk6O0Uc6eO
|
||||
CszizmiXIXy4O6OiLlVHGKkXHMSW9Nwe9GE95O8G9WR8OZCEuDv+mHPAutO+IjdP
|
||||
PDAAUvy+3XnkceO+HGWRpVvJZfFP2YH4A33InFL5yqlJmSoR/yVingGLxk55bZDM
|
||||
+HYGR3VeMb8Xj1rf/02qERsZyccMCFdAvKDbTwmvglyHdVLu5sPmktxbBYiemfyJ
|
||||
qxMxmYXCc9S0hWrWZW7edktBa9NpE58z1mx+hRIrDNbS2sDHrib9PULYCySyVYcF
|
||||
P+PWEe1CAS5jqkR2ker5td2/pHNnJIycynBEs7l6zbc9fu+nktFJz0q2B+GJAhwE
|
||||
EAEKAAYFAlUwGaQACgkQ+tiY1qQ2QkjMFw//f2hNY3BPNe+1qbhzumMDCnbTnGif
|
||||
kLuAGl9OKt81VHG1f6RnaGiLpR696+6Ja45KzH15cQ5JJl5Bgs1YkR/noTGX8IAD
|
||||
c70eNwiFu8JXTaaeeJrsmFkF9Tueufb364risYkvPP8tNUD3InBFEZT3WN7JKwix
|
||||
coD4/BwekUwOZVDd/uCFEyhlhZsROxdKNisNo3VtAq2s+3tIBAmTrriFUl0K+ZC5
|
||||
zgavcpnPN57zMtW9aK+VO3wXqAKYLYmtgxkVzSLUZt2M7JuwOaAdyuYWAneKZPCu
|
||||
1AXkmyo+d84sd5mZaKOr5xArAFiNMWPUcZL4rkS1Fq4dKtGAqzzR7a7hWtA5o27T
|
||||
6vynuxZ1n0PPh0er2O/zF4znIjm5RhTlfjp/VmhZdQfpulFEQ/dMxxGkQ9z5IYbX
|
||||
mTlSDbCSb+FMsanRBJ7Drp5EmBIudVGY6SHI5Re1RQiEh7GoDfUMUwZO+TVDII5R
|
||||
Ra7WyuimYleJgDo/+7HyfuIyGDaUCVj6pwVtYtYIdOI3tTw1R1Mr0V8yaNVnJghL
|
||||
CHcEJQL+YHSmiMM3ySil3O6tm1By6lFz8bVe/rgG/5uklQrnjMR37jYboi1orCC4
|
||||
yeIoQeV0ItlxeTyBwYIV/o1DBNxDevTZvJabC93WiGLw2XFjpZ0q/9+zI2rJUZJh
|
||||
qxmKP+D4e27lCI65Ag0EVTAYdgEQAMvttYNqeRNBRpSX8fk45WVIV8Fb21fWdwk6
|
||||
2SkZnJURbiC0LxQnOi7wrtii7DeFZtwM2kFHihS1VHekBnIKKZQSgGoKuFAQMGyu
|
||||
a426H4ZsSmA9Ufd7kRbvdtEcp7/RTAanhrSL4lkBhaKJrXlxBJ27o3nd7/rh7r3a
|
||||
OszbPY6DJ5bWClX3KooPTDl/RF2lHn+fweFk58UvuunHIyo4BWJUdilSXIjLun+P
|
||||
Qaik4ZAsZVwNhdNz05d+vtai4AwbYoO7adboMLRkYaXSQwGytkm+fM6r7OpXHYuS
|
||||
cR4zB/OK5hxCVEpWfiwN71N2NMvnEMaWd/9uhqxJzyvYgkVUXV9274TUe16pzXnW
|
||||
ZLfmitjwc91e7mJBBfKNenDdhaLEIlDRwKTLj7k58f9srpMnyZFacntu5pUMNblB
|
||||
cjXwWxz5ZaQikLnKYhIvrIEwtWPyjqOzNXNvYfZamve/LJ8HmWGCKao3QHoAIDvB
|
||||
9XBxrDyTJDpxbog6Qu4SY8AdgVlan6c/PsLDc7EUegeYiNTzsOK+eq3G5/E92eIu
|
||||
TsUXlciypFcRm1q8vLRr+HYYe2mJDo4GetB1zLkAFBcYJm/x9iJQbu0hn5NxJvZO
|
||||
R0Y5nOJQdyi+muJzKYwhkuzaOlswzqVXkq/7+QCjg7QsycdcwDjiQh3OrsgXHrwl
|
||||
M7gyafL9ABEBAAGJAh8EGAEKAAkFAlUwGHYCGwwACgkQzUmgr8V5Kb50BxAAhj9T
|
||||
TwmNrgRldTHszj+Qc+v8RWqV6j+R+zc0cn5XlUa6XFaXI1OFFg71H4dhCPEiYeN0
|
||||
IrnocyMNvCol+eKIlPKbPTmoixjQ4udPTR1DC1Bx1MyW5FqOrsgBl5t0e1VwEViM
|
||||
NspSStxu5Hsr6oWz2GD48lXZWJOgoL1RLs+uxjcyjySD/em2fOKASwchYmI+ezRv
|
||||
plfhAFIMKTSCN2pgVTEOaaz13M0U+MoprThqF1LWzkGkkC7n/1V1f5tn83BWiagG
|
||||
2N2Q4tHLfyouzMUKnX28kQ9sXfxwmYb2sA9FNIgxy+TdKU2ofLxivoWT8zS189z/
|
||||
Yj9fErmiMjns2FzEDX+bipAw55X4D/RsaFgC+2x2PDbxeQh6JalRA2Wjq32Ouubx
|
||||
u+I4QhEDJIcVwt9x6LPDuos1F+M5QW0AiUhKrZJ17UrxOtaquh/nPUL9T3l2qPUn
|
||||
1ChrZEEEhHO6vA8+jn0+cV9n5xEz30Str9iHnDQ5QyR5LyV4UBPgTdWyQzNVKA69
|
||||
KsSr9lbHEtQFRzGuBKwt6UlSFv9vPWWJkJit5XDKAlcKuGXj0J8OlltToocGElkF
|
||||
+gEBZfoOWi/IBjRLrFW2cT3p36DTR5O1Ud/1DLnWRqgWNBLrbs2/KMKE6EnHttyD
|
||||
7Tz8SQkuxltX/yBXMV3Ddy0t6nWV2SZEfuxJAQI=
|
||||
=spg4
|
||||
mQINBGkSZAQBEACprU199OhgdsOsygNjiQV4msuN3vDOUooehL+NwfsGfW79Tbqq
|
||||
Q2u7uQ3NZjW+M2T4nsDwuhkr7pe7xSReR5W8ssaczvtUyxkvbMClilcgZ2OSCAuC
|
||||
N9tzJsqOqkwBvXoNXkn//T2jnPz0ZU2wSF+NrEibq5FeuyGdoX3yXXBxq9pW9HzK
|
||||
HkQll63QSl6BzVSGRQq+B6lGgaZGLwf3mzmIND9Z5VGLNK2jKynyz9z091whNG/M
|
||||
kV+E7/r/bujHk7WIVId07G5/COTXmSr7kFnNEkd2Umw42dkgfiNKvlmJ9M7c1wLK
|
||||
KbL9Eb4ADuW6rRc5k4s1e6GT8R4/VPliWbCl9SE32hXH8uTkqVIFZP2eyM5WRRHs
|
||||
aKzitkQG9UK9gcb0kdgUkxOvvgPHAe5IuZlcHFzU4y0dBbU1VEFWVpiLU0q+IuNw
|
||||
5BRemeHc59YNsngkmAZ+/9zouoShRusZmC8Wzotv75C2qVBcjijPvmjWAUz0Zunm
|
||||
Lsr+O71vqHE73pERjD07wuD/ISjiYRYYE/bVrXtXLZijC7qAH4RE3nID+2ojcZyO
|
||||
/2jMQvt7un56RsGH4UBHi3aBHi9bUoDGCXKiQY981cEuNaOxpou7Mh3x/ONzzSvk
|
||||
sTV6nl1LOZHykN1JyKwaNbTSAiuyoN+7lOBqbV04DNYAHL88PrT21P83aQARAQAB
|
||||
tB1SaXBwbGUgTGFicyA8YnVnc0ByaXBwbGUuY29tPokCTgQTAQgAOBYhBLcsBlQv
|
||||
KuJQJ2OiaNkAhVqp9RTgBQJpEmQEAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheA
|
||||
AAoJENkAhVqp9RTgBzgP/i7y+aDWl1maig1XMdyb+o0UGusumFSW4Hmj278wlKVv
|
||||
usgLPihYgHE0PKrv6WRyKOMC1tQEcYYN93M+OeQ1vFhS2YyURq6RCMmh4zq/awXG
|
||||
uZbG36OURB5NH8lGBOHiN/7O+nY0CgenBT2JWm+GW3nEOAVOVm4+r5GlpPlv+Dp1
|
||||
NPBThcKXFMnH73++NpSQoDzTfRYHPxhDAX3jkLi/moXfSanOLlR6l94XNNN0jBHW
|
||||
Quao0rzf4WSXq9g6AS224xhAA5JyIcFl8TX7hzj5HaFn3VWo3COoDu4U7H+BM0fl
|
||||
85yqiMQypp7EhN2gxpMMWaHY5TFM85U/bFXFYfEgihZ4/gt4uoIzsNI9jlX7mYvG
|
||||
KFdDij+oTlRsuOxdIy60B3dKcwOH9nZZCz0SPsN/zlRWgKzK4gDKdGhFkU9OlvPu
|
||||
94ZqscanoiWKDoZkF96+sjgfjkuHsDK7Lwc1Xi+T4drHG/3aVpkYabXox+lrKB/S
|
||||
yxZjeqOIQzWPhnLgCaLyvsKo5hxKzL0w3eURu8F3IS7RgOOlljv4M+Me9sEVcdNV
|
||||
aN3/tQwbaomSX1X5D5YXqhBwC3rU3wXwamsscRTGEpkV+JCX6KUqGP7nWmxCpAly
|
||||
FL05XuOd5SVHJjXLeuje0JqLUpN514uL+bThWwDbDTdAdwW3oK/2WbXz7IfJRLBj
|
||||
uQINBGkSZAQBEADdI3SL2F72qkrgFqXWE6HSRBu9bsAvTE5QrRPWk7ux6at537r4
|
||||
S4sIw2dOwLvbyIrDgKNq3LQ5wCK88NO/NeCOFm4AiCJSl3pJHXYnTDoUxTrrxx+o
|
||||
vSRI4I3fHEql/MqzgiAb0YUezjgFdh3vYheMPp/309PFbOLhiFqEcx80Mx5h06UH
|
||||
gDzu1qNj3Ec+31NLic5zwkrAkvFvD54d6bqYR3SEgMau6aYEewpGHbWBi2pLqSi2
|
||||
lQcAeOFixqGpTwDmAnYR8YtjBYepy0MojEAdTHcQQlOYSDk4q4elG+io2N8vECfU
|
||||
rD6ORecN48GXdZINYWTAdslrUeanmBdgQrYkSpce8TSghgT9P01SNaXxmyaehVUO
|
||||
lqI4pcg5G2oojAE8ncNS3TwDtt7daTaTC3bAdr4PXDVAzNAiewjMNZPB7xidkDGQ
|
||||
Y4W1LxTMXyJVWxehYOH7tsbBRKninlfRnLgYzmtIbNRAAvNcsxU6ihv3AV0WFknN
|
||||
YbSzotEv1Xq/5wk309x8zCDe+sP0cQicvbXafXmUzPAZzeqFg+VLFn7F9MP1WGlW
|
||||
B1u7VIvBF1Mp9Nd3EAGBAoLRdRu+0dVWIjPTQuPIuD9cCatJA0wVaKUrjYbBMl88
|
||||
a12LixNVGeSFS9N7ADHx0/o7GNT6l88YbaLP6zggUHpUD/bR+cDN7vllIQARAQAB
|
||||
iQI2BBgBCAAgFiEEtywGVC8q4lAnY6Jo2QCFWqn1FOAFAmkSZAQCGwwACgkQ2QCF
|
||||
Wqn1FOAfAA/8CYq4p0p4bobY20CKEMsZrkBTFJyPDqzFwMeTjgpzqbD7Y3Qq5QCK
|
||||
OBbvY02GWdiIsNOzKdBxiuam2xYP9WHZj4y7/uWEvT0qlPVmDFu+HXjoJ43oxwFd
|
||||
CUp2gMuQ4cSL3X94VRJ3BkVL+tgBm8CNY0vnTLLOO3kum/R69VsGJS1JSGUWjNM+
|
||||
4qwS3mz+73xJu1HmERyN2RZF/DGIZI2PyONQQ6aH85G1Dd2ohu2/DBAkQAMBrPbj
|
||||
FrbDaBLyFhODxU3kTWqnfLlaElSm2EGdIU2yx7n4BggEa//NZRMm5kyeo4vzhtlQ
|
||||
YIVUMLAOLZvnEqDnsLKp+22FzNR/O+htBQC4lPywl53oYSALdhz1IQlcAC1ru5KR
|
||||
XPzhIXV6IIzkcx9xNkEclZxmsuy5ERXyKEmLbIHAlzFmnrldlt2ZgXDtzaorLmxj
|
||||
klKibxd5tF50qOpOivz+oPtFo7n+HmFa1nlVAMxlDCUdM0pEVeYDKI5zfVwalyhZ
|
||||
NnjpakdZSXMwgc7NP/hH9buF35hKDp7EckT2y3JNYwHsDdy1icXN2q40XZw5tSIn
|
||||
zkPWdu3OUY8PISohN6Pw4h0RH4ZmoX97E8sEfmdKaT58U4Hf2aAv5r9IWCSrAVqY
|
||||
u5jvac29CzQR9Kal0A+8phHAXHNFD83SwzIC0syaT9ficAguwGH8X6Q=
|
||||
=nGuD
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
```
|
||||
|
||||
470
bin/browser.js
470
bin/browser.js
@@ -1,470 +0,0 @@
|
||||
#!/usr/bin/node
|
||||
//
|
||||
// ledger?l=L
|
||||
// transaction?h=H
|
||||
// ledger_entry?l=L&h=H
|
||||
// account?l=L&a=A
|
||||
// directory?l=L&dir_root=H&i=I
|
||||
// directory?l=L&o=A&i=I // owner directory
|
||||
// offer?l=L&offer=H
|
||||
// offer?l=L&account=A&i=I
|
||||
// ripple_state=l=L&a=A&b=A&c=C
|
||||
// account_lines?l=L&a=A
|
||||
//
|
||||
// A=address
|
||||
// C=currency 3 letter code
|
||||
// H=hash
|
||||
// I=index
|
||||
// L=current | closed | validated | index | hash
|
||||
//
|
||||
|
||||
var async = require("async");
|
||||
var extend = require("extend");
|
||||
var http = require("http");
|
||||
var url = require("url");
|
||||
|
||||
var Remote = require("ripple-lib").Remote;
|
||||
|
||||
var program = process.argv[1];
|
||||
|
||||
var httpd_response = function (res, opts) {
|
||||
var self=this;
|
||||
|
||||
res.statusCode = opts.statusCode;
|
||||
res.end(
|
||||
"<HTML>"
|
||||
+ "<HEAD><TITLE>Title</TITLE></HEAD>"
|
||||
+ "<BODY BACKGROUND=\"#FFFFFF\">"
|
||||
+ "State:" + self.state
|
||||
+ "<UL>"
|
||||
+ "<LI><A HREF=\"/\">home</A>"
|
||||
+ "<LI>" + html_link('r4EM4gBQfr1QgQLXSPF4r7h84qE9mb6iCC')
|
||||
// + "<LI><A HREF=\""+test+"\">rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh</A>"
|
||||
+ "<LI><A HREF=\"/ledger\">ledger</A>"
|
||||
+ "</UL>"
|
||||
+ (opts.body || '')
|
||||
+ '<HR><PRE>'
|
||||
+ (opts.url || '')
|
||||
+ '</PRE>'
|
||||
+ "</BODY>"
|
||||
+ "</HTML>"
|
||||
);
|
||||
};
|
||||
|
||||
var html_link = function (generic) {
|
||||
return '<A HREF="' + build_uri({ type: 'account', account: generic}) + '">' + generic + '</A>';
|
||||
};
|
||||
|
||||
// Build a link to a type.
|
||||
var build_uri = function (params, opts) {
|
||||
var c;
|
||||
|
||||
if (params.type === 'account') {
|
||||
c = {
|
||||
pathname: 'account',
|
||||
query: {
|
||||
l: params.ledger,
|
||||
a: params.account,
|
||||
},
|
||||
};
|
||||
|
||||
} else if (params.type === 'ledger') {
|
||||
c = {
|
||||
pathname: 'ledger',
|
||||
query: {
|
||||
l: params.ledger,
|
||||
},
|
||||
};
|
||||
|
||||
} else if (params.type === 'transaction') {
|
||||
c = {
|
||||
pathname: 'transaction',
|
||||
query: {
|
||||
h: params.hash,
|
||||
},
|
||||
};
|
||||
} else {
|
||||
c = {};
|
||||
}
|
||||
|
||||
opts = opts || {};
|
||||
|
||||
c.protocol = "http";
|
||||
c.hostname = opts.hostname || self.base.hostname;
|
||||
c.port = opts.port || self.base.port;
|
||||
|
||||
return url.format(c);
|
||||
};
|
||||
|
||||
var build_link = function (item, link) {
|
||||
console.log(link);
|
||||
return "<A HREF=" + link + ">" + item + "</A>";
|
||||
};
|
||||
|
||||
var rewrite_field = function (type, obj, field, opts) {
|
||||
if (field in obj) {
|
||||
obj[field] = rewrite_type(type, obj[field], opts);
|
||||
}
|
||||
};
|
||||
|
||||
var rewrite_type = function (type, obj, opts) {
|
||||
if ('amount' === type) {
|
||||
if ('string' === typeof obj) {
|
||||
// XRP.
|
||||
return '<B>' + obj + '</B>';
|
||||
|
||||
} else {
|
||||
rewrite_field('address', obj, 'issuer', opts);
|
||||
|
||||
return obj;
|
||||
}
|
||||
return build_link(
|
||||
obj,
|
||||
build_uri({
|
||||
type: 'account',
|
||||
account: obj
|
||||
}, opts)
|
||||
);
|
||||
}
|
||||
if ('address' === type) {
|
||||
return build_link(
|
||||
obj,
|
||||
build_uri({
|
||||
type: 'account',
|
||||
account: obj
|
||||
}, opts)
|
||||
);
|
||||
}
|
||||
else if ('ledger' === type) {
|
||||
return build_link(
|
||||
obj,
|
||||
build_uri({
|
||||
type: 'ledger',
|
||||
ledger: obj,
|
||||
}, opts)
|
||||
);
|
||||
}
|
||||
else if ('node' === type) {
|
||||
// A node
|
||||
if ('PreviousTxnID' in obj)
|
||||
obj.PreviousTxnID = rewrite_type('transaction', obj.PreviousTxnID, opts);
|
||||
|
||||
if ('Offer' === obj.LedgerEntryType) {
|
||||
if ('NewFields' in obj) {
|
||||
if ('TakerGets' in obj.NewFields)
|
||||
obj.NewFields.TakerGets = rewrite_type('amount', obj.NewFields.TakerGets, opts);
|
||||
|
||||
if ('TakerPays' in obj.NewFields)
|
||||
obj.NewFields.TakerPays = rewrite_type('amount', obj.NewFields.TakerPays, opts);
|
||||
}
|
||||
}
|
||||
|
||||
obj.LedgerEntryType = '<B>' + obj.LedgerEntryType + '</B>';
|
||||
|
||||
return obj;
|
||||
}
|
||||
else if ('transaction' === type) {
|
||||
// Reference to a transaction.
|
||||
return build_link(
|
||||
obj,
|
||||
build_uri({
|
||||
type: 'transaction',
|
||||
hash: obj
|
||||
}, opts)
|
||||
);
|
||||
}
|
||||
|
||||
return 'ERROR: ' + type;
|
||||
};
|
||||
|
||||
var rewrite_object = function (obj, opts) {
|
||||
var out = extend({}, obj);
|
||||
|
||||
rewrite_field('address', out, 'Account', opts);
|
||||
|
||||
rewrite_field('ledger', out, 'parent_hash', opts);
|
||||
rewrite_field('ledger', out, 'ledger_index', opts);
|
||||
rewrite_field('ledger', out, 'ledger_current_index', opts);
|
||||
rewrite_field('ledger', out, 'ledger_hash', opts);
|
||||
|
||||
if ('ledger' in obj) {
|
||||
// It's a ledger header.
|
||||
out.ledger = rewrite_object(out.ledger, opts);
|
||||
|
||||
if ('ledger_hash' in out.ledger)
|
||||
out.ledger.ledger_hash = '<B>' + out.ledger.ledger_hash + '</B>';
|
||||
|
||||
delete out.ledger.hash;
|
||||
delete out.ledger.totalCoins;
|
||||
}
|
||||
|
||||
if ('TransactionType' in obj) {
|
||||
// It's a transaction.
|
||||
out.TransactionType = '<B>' + obj.TransactionType + '</B>';
|
||||
|
||||
rewrite_field('amount', out, 'TakerGets', opts);
|
||||
rewrite_field('amount', out, 'TakerPays', opts);
|
||||
rewrite_field('ledger', out, 'inLedger', opts);
|
||||
|
||||
out.meta.AffectedNodes = out.meta.AffectedNodes.map(function (node) {
|
||||
var kind = 'CreatedNode' in node
|
||||
? 'CreatedNode'
|
||||
: 'ModifiedNode' in node
|
||||
? 'ModifiedNode'
|
||||
: 'DeletedNode' in node
|
||||
? 'DeletedNode'
|
||||
: undefined;
|
||||
|
||||
if (kind) {
|
||||
node[kind] = rewrite_type('node', node[kind], opts);
|
||||
}
|
||||
return node;
|
||||
});
|
||||
}
|
||||
else if ('node' in obj && 'LedgerEntryType' in obj.node) {
|
||||
// Its a ledger entry.
|
||||
|
||||
if (obj.node.LedgerEntryType === 'AccountRoot') {
|
||||
rewrite_field('address', out.node, 'Account', opts);
|
||||
rewrite_field('transaction', out.node, 'PreviousTxnID', opts);
|
||||
rewrite_field('ledger', out.node, 'PreviousTxnLgrSeq', opts);
|
||||
}
|
||||
|
||||
out.node.LedgerEntryType = '<B>' + out.node.LedgerEntryType + '</B>';
|
||||
}
|
||||
|
||||
return out;
|
||||
};
|
||||
|
||||
var augment_object = function (obj, opts, done) {
|
||||
if (obj.node.LedgerEntryType == 'AccountRoot') {
|
||||
var tx_hash = obj.node.PreviousTxnID;
|
||||
var tx_ledger = obj.node.PreviousTxnLgrSeq;
|
||||
|
||||
obj.history = [];
|
||||
|
||||
async.whilst(
|
||||
function () { return tx_hash; },
|
||||
function (callback) {
|
||||
// console.log("augment_object: request: %s %s", tx_hash, tx_ledger);
|
||||
opts.remote.request_tx(tx_hash)
|
||||
.on('success', function (m) {
|
||||
tx_hash = undefined;
|
||||
tx_ledger = undefined;
|
||||
|
||||
//console.log("augment_object: ", JSON.stringify(m));
|
||||
m.meta.AffectedNodes.filter(function(n) {
|
||||
// console.log("augment_object: ", JSON.stringify(n));
|
||||
// if (n.ModifiedNode)
|
||||
// console.log("augment_object: %s %s %s %s %s %s/%s", 'ModifiedNode' in n, n.ModifiedNode && (n.ModifiedNode.LedgerEntryType === 'AccountRoot'), n.ModifiedNode && n.ModifiedNode.FinalFields && (n.ModifiedNode.FinalFields.Account === obj.node.Account), Object.keys(n)[0], n.ModifiedNode && (n.ModifiedNode.LedgerEntryType), obj.node.Account, n.ModifiedNode && n.ModifiedNode.FinalFields && n.ModifiedNode.FinalFields.Account);
|
||||
// if ('ModifiedNode' in n && n.ModifiedNode.LedgerEntryType === 'AccountRoot')
|
||||
// {
|
||||
// console.log("***: ", JSON.stringify(m));
|
||||
// console.log("***: ", JSON.stringify(n));
|
||||
// }
|
||||
return 'ModifiedNode' in n
|
||||
&& n.ModifiedNode.LedgerEntryType === 'AccountRoot'
|
||||
&& n.ModifiedNode.FinalFields
|
||||
&& n.ModifiedNode.FinalFields.Account === obj.node.Account;
|
||||
})
|
||||
.forEach(function (n) {
|
||||
tx_hash = n.ModifiedNode.PreviousTxnID;
|
||||
tx_ledger = n.ModifiedNode.PreviousTxnLgrSeq;
|
||||
|
||||
obj.history.push({
|
||||
tx_hash: tx_hash,
|
||||
tx_ledger: tx_ledger
|
||||
});
|
||||
console.log("augment_object: next: %s %s", tx_hash, tx_ledger);
|
||||
});
|
||||
|
||||
callback();
|
||||
})
|
||||
.on('error', function (m) {
|
||||
callback(m);
|
||||
})
|
||||
.request();
|
||||
},
|
||||
function (err) {
|
||||
if (err) {
|
||||
done();
|
||||
}
|
||||
else {
|
||||
async.forEach(obj.history, function (o, callback) {
|
||||
opts.remote.request_account_info(obj.node.Account)
|
||||
.ledger_index(o.tx_ledger)
|
||||
.on('success', function (m) {
|
||||
//console.log("augment_object: ", JSON.stringify(m));
|
||||
o.Balance = m.account_data.Balance;
|
||||
// o.account_data = m.account_data;
|
||||
callback();
|
||||
})
|
||||
.on('error', function (m) {
|
||||
o.error = m;
|
||||
callback();
|
||||
})
|
||||
.request();
|
||||
},
|
||||
function (err) {
|
||||
done(err);
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
else {
|
||||
done();
|
||||
}
|
||||
};
|
||||
|
||||
if (process.argv.length < 4 || process.argv.length > 7) {
|
||||
console.log("Usage: %s ws_ip ws_port [<ip> [<port> [<start>]]]", program);
|
||||
}
|
||||
else {
|
||||
var ws_ip = process.argv[2];
|
||||
var ws_port = process.argv[3];
|
||||
var ip = process.argv.length > 4 ? process.argv[4] : "127.0.0.1";
|
||||
var port = process.argv.length > 5 ? process.argv[5] : "8080";
|
||||
|
||||
// console.log("START");
|
||||
var self = this;
|
||||
|
||||
var remote = (new Remote({
|
||||
websocket_ip: ws_ip,
|
||||
websocket_port: ws_port,
|
||||
trace: false
|
||||
}))
|
||||
.on('state', function (m) {
|
||||
console.log("STATE: %s", m);
|
||||
|
||||
self.state = m;
|
||||
})
|
||||
// .once('ledger_closed', callback)
|
||||
.connect()
|
||||
;
|
||||
|
||||
self.base = {
|
||||
hostname: ip,
|
||||
port: port,
|
||||
remote: remote,
|
||||
};
|
||||
|
||||
// console.log("SERVE");
|
||||
var server = http.createServer(function (req, res) {
|
||||
var input = "";
|
||||
|
||||
req.setEncoding();
|
||||
|
||||
req.on('data', function (buffer) {
|
||||
// console.log("DATA: %s", buffer);
|
||||
input = input + buffer;
|
||||
});
|
||||
|
||||
req.on('end', function () {
|
||||
// console.log("URL: %s", req.url);
|
||||
// console.log("HEADERS: %s", JSON.stringify(req.headers, undefined, 2));
|
||||
|
||||
var _parsed = url.parse(req.url, true);
|
||||
var _url = JSON.stringify(_parsed, undefined, 2);
|
||||
|
||||
// console.log("HEADERS: %s", JSON.stringify(_parsed, undefined, 2));
|
||||
if (_parsed.pathname === "/account") {
|
||||
var request = remote
|
||||
.request_ledger_entry('account_root')
|
||||
.ledger_index(-1)
|
||||
.account_root(_parsed.query.a)
|
||||
.on('success', function (m) {
|
||||
// console.log("account_root: %s", JSON.stringify(m, undefined, 2));
|
||||
|
||||
augment_object(m, self.base, function() {
|
||||
httpd_response(res,
|
||||
{
|
||||
statusCode: 200,
|
||||
url: _url,
|
||||
body: "<PRE>"
|
||||
+ JSON.stringify(rewrite_object(m, self.base), undefined, 2)
|
||||
+ "</PRE>"
|
||||
});
|
||||
});
|
||||
})
|
||||
.request();
|
||||
|
||||
} else if (_parsed.pathname === "/ledger") {
|
||||
var request = remote
|
||||
.request_ledger(undefined, { expand: true, transactions: true })
|
||||
.on('success', function (m) {
|
||||
// console.log("Ledger: %s", JSON.stringify(m, undefined, 2));
|
||||
|
||||
httpd_response(res,
|
||||
{
|
||||
statusCode: 200,
|
||||
url: _url,
|
||||
body: "<PRE>"
|
||||
+ JSON.stringify(rewrite_object(m, self.base), undefined, 2)
|
||||
+"</PRE>"
|
||||
});
|
||||
})
|
||||
|
||||
if (_parsed.query.l && _parsed.query.l.length === 64) {
|
||||
request.ledger_hash(_parsed.query.l);
|
||||
}
|
||||
else if (_parsed.query.l) {
|
||||
request.ledger_index(Number(_parsed.query.l));
|
||||
}
|
||||
else {
|
||||
request.ledger_index(-1);
|
||||
}
|
||||
|
||||
request.request();
|
||||
|
||||
} else if (_parsed.pathname === "/transaction") {
|
||||
var request = remote
|
||||
.request_tx(_parsed.query.h)
|
||||
// .request_transaction_entry(_parsed.query.h)
|
||||
// .ledger_select(_parsed.query.l)
|
||||
.on('success', function (m) {
|
||||
// console.log("transaction: %s", JSON.stringify(m, undefined, 2));
|
||||
|
||||
httpd_response(res,
|
||||
{
|
||||
statusCode: 200,
|
||||
url: _url,
|
||||
body: "<PRE>"
|
||||
+ JSON.stringify(rewrite_object(m, self.base), undefined, 2)
|
||||
+"</PRE>"
|
||||
});
|
||||
})
|
||||
.on('error', function (m) {
|
||||
httpd_response(res,
|
||||
{
|
||||
statusCode: 200,
|
||||
url: _url,
|
||||
body: "<PRE>"
|
||||
+ 'ERROR: ' + JSON.stringify(m, undefined, 2)
|
||||
+"</PRE>"
|
||||
});
|
||||
})
|
||||
.request();
|
||||
|
||||
} else {
|
||||
var test = build_uri({
|
||||
type: 'account',
|
||||
ledger: 'closed',
|
||||
account: 'rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh',
|
||||
}, self.base);
|
||||
|
||||
httpd_response(res,
|
||||
{
|
||||
statusCode: req.url === "/" ? 200 : 404,
|
||||
url: _url,
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
server.listen(port, ip, undefined,
|
||||
function () {
|
||||
console.log("Listening at: http://%s:%s", ip, port);
|
||||
});
|
||||
}
|
||||
|
||||
// vim:sw=2:sts=2:ts=8:et
|
||||
@@ -1,24 +0,0 @@
|
||||
In this directory are two scripts, `build.sh` and `test.sh` used for building
|
||||
and testing rippled.
|
||||
|
||||
(For now, they assume Bash and Linux. Once I get Windows containers for
|
||||
testing, I'll try them there, but if Bash is not available, then they will
|
||||
soon be joined by PowerShell scripts `build.ps` and `test.ps`.)
|
||||
|
||||
We don't want these scripts to require arcane invocations that can only be
|
||||
pieced together from within a CI configuration. We want something that humans
|
||||
can easily invoke, read, and understand, for when we eventually have to test
|
||||
and debug them interactively. That means:
|
||||
|
||||
(1) They should work with no arguments.
|
||||
(2) They should document their arguments.
|
||||
(3) They should expand short arguments into long arguments.
|
||||
|
||||
While we want to provide options for common use cases, we don't need to offer
|
||||
the kitchen sink. We can rightfully expect users with esoteric, complicated
|
||||
needs to write their own scripts.
|
||||
|
||||
To make argument-handling easy for us, the implementers, we can just take all
|
||||
arguments from environment variables. They have the nice advantage that every
|
||||
command-line uses named arguments. For the benefit of us and our users, we
|
||||
document those variables at the top of each script.
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o xtrace
|
||||
set -o errexit
|
||||
|
||||
# The build system. Either 'Unix Makefiles' or 'Ninja'.
|
||||
GENERATOR=${GENERATOR:-Unix Makefiles}
|
||||
# The compiler. Either 'gcc' or 'clang'.
|
||||
COMPILER=${COMPILER:-gcc}
|
||||
# The build type. Either 'Debug' or 'Release'.
|
||||
BUILD_TYPE=${BUILD_TYPE:-Debug}
|
||||
# Additional arguments to CMake.
|
||||
# We use the `-` substitution here instead of `:-` so that callers can erase
|
||||
# the default by setting `$CMAKE_ARGS` to the empty string.
|
||||
CMAKE_ARGS=${CMAKE_ARGS-'-Dwerr=ON'}
|
||||
|
||||
# https://gitlab.kitware.com/cmake/cmake/issues/18865
|
||||
CMAKE_ARGS="-DBoost_NO_BOOST_CMAKE=ON ${CMAKE_ARGS}"
|
||||
|
||||
if [[ ${COMPILER} == 'gcc' ]]; then
|
||||
export CC='gcc'
|
||||
export CXX='g++'
|
||||
elif [[ ${COMPILER} == 'clang' ]]; then
|
||||
export CC='clang'
|
||||
export CXX='clang++'
|
||||
fi
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -G "${GENERATOR}" -DCMAKE_BUILD_TYPE=${BUILD_TYPE} ${CMAKE_ARGS} ..
|
||||
cmake --build . -- -j $(nproc)
|
||||
@@ -1,41 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o xtrace
|
||||
set -o errexit
|
||||
|
||||
# Set to 'true' to run the known "manual" tests in rippled.
|
||||
MANUAL_TESTS=${MANUAL_TESTS:-false}
|
||||
# The maximum number of concurrent tests.
|
||||
CONCURRENT_TESTS=${CONCURRENT_TESTS:-$(nproc)}
|
||||
# The path to rippled.
|
||||
RIPPLED=${RIPPLED:-build/rippled}
|
||||
# Additional arguments to rippled.
|
||||
RIPPLED_ARGS=${RIPPLED_ARGS:-}
|
||||
|
||||
function join_by { local IFS="$1"; shift; echo "$*"; }
|
||||
|
||||
declare -a manual_tests=(
|
||||
'beast.chrono.abstract_clock'
|
||||
'beast.unit_test.print'
|
||||
'ripple.NodeStore.Timing'
|
||||
'ripple.app.Flow_manual'
|
||||
'ripple.app.NoRippleCheckLimits'
|
||||
'ripple.app.PayStrandAllPairs'
|
||||
'ripple.consensus.ByzantineFailureSim'
|
||||
'ripple.consensus.DistributedValidators'
|
||||
'ripple.consensus.ScaleFreeSim'
|
||||
'ripple.tx.CrossingLimits'
|
||||
'ripple.tx.FindOversizeCross'
|
||||
'ripple.tx.Offer_manual'
|
||||
'ripple.tx.OversizeMeta'
|
||||
'ripple.tx.PlumpBook'
|
||||
)
|
||||
|
||||
if [[ ${MANUAL_TESTS} == 'true' ]]; then
|
||||
RIPPLED_ARGS+=" --unittest=$(join_by , "${manual_tests[@]}")"
|
||||
else
|
||||
RIPPLED_ARGS+=" --unittest --quiet --unittest-log"
|
||||
fi
|
||||
RIPPLED_ARGS+=" --unittest-jobs ${CONCURRENT_TESTS}"
|
||||
|
||||
${RIPPLED} ${RIPPLED_ARGS}
|
||||
@@ -1,274 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
|
||||
function version_ge() { test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" == "$1"; }
|
||||
|
||||
__dirname=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
|
||||
echo "using CC: ${CC}"
|
||||
"${CC}" --version
|
||||
export CC
|
||||
|
||||
COMPNAME=$(basename $CC)
|
||||
echo "using CXX: ${CXX:-notset}"
|
||||
if [[ $CXX ]]; then
|
||||
"${CXX}" --version
|
||||
export CXX
|
||||
fi
|
||||
: ${BUILD_TYPE:=Debug}
|
||||
echo "BUILD TYPE: ${BUILD_TYPE}"
|
||||
|
||||
: ${TARGET:=install}
|
||||
echo "BUILD TARGET: ${TARGET}"
|
||||
|
||||
JOBS=${NUM_PROCESSORS:-2}
|
||||
if [[ ${TRAVIS:-false} != "true" ]]; then
|
||||
JOBS=$((JOBS+1))
|
||||
fi
|
||||
|
||||
if [[ ! -z "${CMAKE_EXE:-}" ]] ; then
|
||||
export PATH="$(dirname ${CMAKE_EXE}):$PATH"
|
||||
fi
|
||||
|
||||
if [ -x /usr/bin/time ] ; then
|
||||
: ${TIME:="Duration: %E"}
|
||||
export TIME
|
||||
time=/usr/bin/time
|
||||
else
|
||||
time=
|
||||
fi
|
||||
|
||||
echo "Building rippled"
|
||||
: ${CMAKE_EXTRA_ARGS:=""}
|
||||
if [[ ${NINJA_BUILD:-} == true ]]; then
|
||||
CMAKE_EXTRA_ARGS+=" -G Ninja"
|
||||
fi
|
||||
|
||||
coverage=false
|
||||
if [[ "${TARGET}" == "coverage" ]] ; then
|
||||
echo "coverage option detected."
|
||||
coverage=true
|
||||
fi
|
||||
|
||||
cmake --version
|
||||
CMAKE_VER=$(cmake --version | cut -d " " -f 3 | head -1)
|
||||
|
||||
#
|
||||
# allow explicit setting of the name of the build
|
||||
# dir, otherwise default to the compiler.build_type
|
||||
#
|
||||
: "${BUILD_DIR:=${COMPNAME}.${BUILD_TYPE}}"
|
||||
BUILDARGS="--target ${TARGET}"
|
||||
BUILDTOOLARGS=""
|
||||
if version_ge $CMAKE_VER "3.12.0" ; then
|
||||
BUILDARGS+=" --parallel"
|
||||
fi
|
||||
|
||||
if [[ ${NINJA_BUILD:-} == false ]]; then
|
||||
if version_ge $CMAKE_VER "3.12.0" ; then
|
||||
BUILDARGS+=" ${JOBS}"
|
||||
else
|
||||
BUILDTOOLARGS+=" -j ${JOBS}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ${VERBOSE_BUILD:-} == true ]]; then
|
||||
CMAKE_EXTRA_ARGS+=" -DCMAKE_VERBOSE_MAKEFILE=ON"
|
||||
if version_ge $CMAKE_VER "3.14.0" ; then
|
||||
BUILDARGS+=" --verbose"
|
||||
else
|
||||
if [[ ${NINJA_BUILD:-} == false ]]; then
|
||||
BUILDTOOLARGS+=" verbose=1"
|
||||
else
|
||||
BUILDTOOLARGS+=" -v"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ${USE_CCACHE:-} == true ]]; then
|
||||
echo "using ccache with basedir [${CCACHE_BASEDIR:-}]"
|
||||
CMAKE_EXTRA_ARGS+=" -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache"
|
||||
fi
|
||||
if [ -d "build/${BUILD_DIR}" ]; then
|
||||
rm -rf "build/${BUILD_DIR}"
|
||||
fi
|
||||
|
||||
mkdir -p "build/${BUILD_DIR}"
|
||||
pushd "build/${BUILD_DIR}"
|
||||
|
||||
# cleanup possible artifacts
|
||||
rm -fv CMakeFiles/CMakeOutput.log CMakeFiles/CMakeError.log
|
||||
# Clean up NIH directories which should be git repos, but aren't
|
||||
for nih_path in ${NIH_CACHE_ROOT}/*/*/*/src ${NIH_CACHE_ROOT}/*/*/src
|
||||
do
|
||||
for dir in lz4 snappy rocksdb
|
||||
do
|
||||
if [ -e ${nih_path}/${dir} -a \! -e ${nih_path}/${dir}/.git ]
|
||||
then
|
||||
ls -la ${nih_path}/${dir}*
|
||||
rm -rfv ${nih_path}/${dir}*
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# generate
|
||||
${time} cmake ../.. -DCMAKE_BUILD_TYPE=${BUILD_TYPE} ${CMAKE_EXTRA_ARGS}
|
||||
# Display the cmake output, to help with debugging if something fails
|
||||
for file in CMakeOutput.log CMakeError.log
|
||||
do
|
||||
if [ -f CMakeFiles/${file} ]
|
||||
then
|
||||
ls -l CMakeFiles/${file}
|
||||
cat CMakeFiles/${file}
|
||||
fi
|
||||
done
|
||||
# build
|
||||
export DESTDIR=$(pwd)/_INSTALLED_
|
||||
|
||||
${time} eval cmake --build . ${BUILDARGS} -- ${BUILDTOOLARGS}
|
||||
|
||||
if [[ ${TARGET} == "docs" ]]; then
|
||||
## mimic the standard test output for docs build
|
||||
## to make controlling processes like jenkins happy
|
||||
if [ -f docs/html/index.html ]; then
|
||||
echo "1 case, 1 test total, 0 failures"
|
||||
else
|
||||
echo "1 case, 1 test total, 1 failures"
|
||||
fi
|
||||
exit
|
||||
fi
|
||||
popd
|
||||
|
||||
if [[ "${TARGET}" == "validator-keys" ]] ; then
|
||||
export APP_PATH="$PWD/build/${BUILD_DIR}/validator-keys/validator-keys"
|
||||
else
|
||||
export APP_PATH="$PWD/build/${BUILD_DIR}/rippled"
|
||||
fi
|
||||
echo "using APP_PATH: ${APP_PATH}"
|
||||
|
||||
# See what we've actually built
|
||||
ldd ${APP_PATH}
|
||||
|
||||
: ${APP_ARGS:=}
|
||||
|
||||
if [[ "${TARGET}" == "validator-keys" ]] ; then
|
||||
APP_ARGS="--unittest"
|
||||
else
|
||||
function join_by { local IFS="$1"; shift; echo "$*"; }
|
||||
|
||||
# This is a list of manual tests
|
||||
# in rippled that we want to run
|
||||
# ORDER matters here...sorted in approximately
|
||||
# descending execution time (longest running tests at top)
|
||||
declare -a manual_tests=(
|
||||
'ripple.ripple_data.reduce_relay_simulate'
|
||||
'ripple.tx.Offer_manual'
|
||||
'ripple.tx.CrossingLimits'
|
||||
'ripple.tx.PlumpBook'
|
||||
'ripple.app.Flow_manual'
|
||||
'ripple.tx.OversizeMeta'
|
||||
'ripple.consensus.DistributedValidators'
|
||||
'ripple.app.NoRippleCheckLimits'
|
||||
'ripple.ripple_data.compression'
|
||||
'ripple.NodeStore.Timing'
|
||||
'ripple.consensus.ByzantineFailureSim'
|
||||
'beast.chrono.abstract_clock'
|
||||
'beast.unit_test.print'
|
||||
)
|
||||
if [[ ${TRAVIS:-false} != "true" ]]; then
|
||||
# these two tests cause travis CI to run out of memory.
|
||||
# TODO: investigate possible workarounds.
|
||||
manual_tests=(
|
||||
'ripple.consensus.ScaleFreeSim'
|
||||
'ripple.tx.FindOversizeCross'
|
||||
"${manual_tests[@]}"
|
||||
)
|
||||
fi
|
||||
|
||||
if [[ ${MANUAL_TESTS:-} == true ]]; then
|
||||
APP_ARGS+=" --unittest=$(join_by , "${manual_tests[@]}")"
|
||||
else
|
||||
APP_ARGS+=" --unittest --quiet --unittest-log"
|
||||
fi
|
||||
if [[ ${coverage} == false && ${PARALLEL_TESTS:-} == true ]]; then
|
||||
APP_ARGS+=" --unittest-jobs ${JOBS}"
|
||||
fi
|
||||
|
||||
if [[ ${IPV6_TESTS:-} == true ]]; then
|
||||
APP_ARGS+=" --unittest-ipv6"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ ${coverage} == true && $CC =~ ^gcc ]]; then
|
||||
# Push the results (lcov.info) to codecov
|
||||
codecov -X gcov # don't even try and look for .gcov files ;)
|
||||
find . -name "*.gcda" | xargs rm -f
|
||||
fi
|
||||
|
||||
if [[ ${SKIP_TESTS:-} == true ]]; then
|
||||
echo "skipping tests."
|
||||
exit
|
||||
fi
|
||||
|
||||
ulimit -a
|
||||
corepat=$(cat /proc/sys/kernel/core_pattern)
|
||||
if [[ ${corepat} =~ ^[:space:]*\| ]] ; then
|
||||
echo "WARNING: core pattern is piping - can't search for core files"
|
||||
look_core=false
|
||||
else
|
||||
look_core=true
|
||||
coredir=$(dirname ${corepat})
|
||||
fi
|
||||
if [[ ${look_core} == true ]]; then
|
||||
before=$(ls -A1 ${coredir})
|
||||
fi
|
||||
|
||||
set +e
|
||||
echo "Running tests for ${APP_PATH}"
|
||||
if [[ ${MANUAL_TESTS:-} == true && ${PARALLEL_TESTS:-} != true ]]; then
|
||||
for t in "${manual_tests[@]}" ; do
|
||||
${APP_PATH} --unittest=${t}
|
||||
TEST_STAT=$?
|
||||
if [[ $TEST_STAT -ne 0 ]] ; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
else
|
||||
${APP_PATH} ${APP_ARGS}
|
||||
TEST_STAT=$?
|
||||
fi
|
||||
set -e
|
||||
|
||||
if [[ ${look_core} == true ]]; then
|
||||
after=$(ls -A1 ${coredir})
|
||||
oIFS="${IFS}"
|
||||
IFS=$'\n\r'
|
||||
found_core=false
|
||||
for l in $(diff -w --suppress-common-lines <(echo "$before") <(echo "$after")) ; do
|
||||
if [[ "$l" =~ ^[[:space:]]*\>[[:space:]]*(.+)$ ]] ; then
|
||||
corefile="${BASH_REMATCH[1]}"
|
||||
echo "FOUND core dump file at '${coredir}/${corefile}'"
|
||||
gdb_output=$(/bin/mktemp /tmp/gdb_output_XXXXXXXXXX.txt)
|
||||
found_core=true
|
||||
gdb \
|
||||
-ex "set height 0" \
|
||||
-ex "set logging file ${gdb_output}" \
|
||||
-ex "set logging on" \
|
||||
-ex "print 'ripple::BuildInfo::versionString'" \
|
||||
-ex "thread apply all backtrace full" \
|
||||
-ex "info inferiors" \
|
||||
-ex quit \
|
||||
"$APP_PATH" \
|
||||
"${coredir}/${corefile}" &> /dev/null
|
||||
|
||||
echo -e "CORE INFO: \n\n $(cat ${gdb_output}) \n\n)"
|
||||
fi
|
||||
done
|
||||
IFS="${oIFS}"
|
||||
fi
|
||||
|
||||
if [[ ${found_core} == true ]]; then
|
||||
exit -1
|
||||
else
|
||||
exit $TEST_STAT
|
||||
fi
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# run our build script in a docker container
|
||||
# using travis-ci hosts
|
||||
set -eux
|
||||
|
||||
function join_by { local IFS="$1"; shift; echo "$*"; }
|
||||
|
||||
set +x
|
||||
echo "VERBOSE_BUILD=true" > /tmp/co.env
|
||||
matchers=(
|
||||
'TRAVIS.*' 'CI' 'CC' 'CXX'
|
||||
'BUILD_TYPE' 'TARGET' 'MAX_TIME'
|
||||
'CODECOV.+' 'CMAKE.*' '.+_TESTS'
|
||||
'.+_OPTIONS' 'NINJA.*' 'NUM_.+'
|
||||
'NIH_.+' 'BOOST.*' '.*CCACHE.*')
|
||||
|
||||
matchstring=$(join_by '|' "${matchers[@]}")
|
||||
echo "MATCHSTRING IS:: $matchstring"
|
||||
env | grep -E "^(${matchstring})=" >> /tmp/co.env
|
||||
set -x
|
||||
# need to eliminate TRAVIS_CMD...don't want to pass it to the container
|
||||
cat /tmp/co.env | grep -v TRAVIS_CMD > /tmp/co.env.2
|
||||
mv /tmp/co.env.2 /tmp/co.env
|
||||
cat /tmp/co.env
|
||||
mkdir -p -m 0777 ${TRAVIS_BUILD_DIR}/cores
|
||||
echo "${TRAVIS_BUILD_DIR}/cores/%e.%p" | sudo tee /proc/sys/kernel/core_pattern
|
||||
docker run \
|
||||
-t --env-file /tmp/co.env \
|
||||
-v ${TRAVIS_HOME}:${TRAVIS_HOME} \
|
||||
-w ${TRAVIS_BUILD_DIR} \
|
||||
--cap-add SYS_PTRACE \
|
||||
--ulimit "core=-1" \
|
||||
$DOCKER_IMAGE \
|
||||
/bin/bash -c 'if [[ $CC =~ ([[:alpha:]]+)-([[:digit:].]+) ]] ; then sudo update-alternatives --set ${BASH_REMATCH[1]} /usr/bin/$CC; fi; bin/ci/ubuntu/build-and-test.sh'
|
||||
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# some cached files create churn, so save them here for
|
||||
# later restoration before packing the cache
|
||||
set -eux
|
||||
clean_cache="travis_clean_cache"
|
||||
if [[ ! ( "${TRAVIS_JOB_NAME}" =~ "windows" || \
|
||||
"${TRAVIS_JOB_NAME}" =~ "prereq-keep" ) ]] && \
|
||||
( [[ "${TRAVIS_COMMIT_MESSAGE}" =~ "${clean_cache}" ]] || \
|
||||
( [[ -v TRAVIS_PULL_REQUEST_SHA && \
|
||||
"${TRAVIS_PULL_REQUEST_SHA}" != "" ]] && \
|
||||
git log -1 "${TRAVIS_PULL_REQUEST_SHA}" | grep -cq "${clean_cache}" -
|
||||
)
|
||||
)
|
||||
then
|
||||
find ${TRAVIS_HOME}/_cache -maxdepth 2 -type d
|
||||
rm -rf ${TRAVIS_HOME}/_cache
|
||||
mkdir -p ${TRAVIS_HOME}/_cache
|
||||
fi
|
||||
|
||||
pushd ${TRAVIS_HOME}
|
||||
if [ -f cache_ignore.tar ] ; then
|
||||
rm -f cache_ignore.tar
|
||||
fi
|
||||
|
||||
if [ -d _cache/nih_c ] ; then
|
||||
find _cache/nih_c -name "build.ninja" | tar rf cache_ignore.tar --files-from -
|
||||
find _cache/nih_c -name ".ninja_deps" | tar rf cache_ignore.tar --files-from -
|
||||
find _cache/nih_c -name ".ninja_log" | tar rf cache_ignore.tar --files-from -
|
||||
find _cache/nih_c -name "*.log" | tar rf cache_ignore.tar --files-from -
|
||||
find _cache/nih_c -name "*.tlog" | tar rf cache_ignore.tar --files-from -
|
||||
# show .a files in the cache, for sanity checking
|
||||
find _cache/nih_c -name "*.a" -ls
|
||||
fi
|
||||
|
||||
if [ -d _cache/ccache ] ; then
|
||||
find _cache/ccache -name "stats" | tar rf cache_ignore.tar --files-from -
|
||||
fi
|
||||
|
||||
if [ -f cache_ignore.tar ] ; then
|
||||
tar -tf cache_ignore.tar
|
||||
fi
|
||||
popd
|
||||
|
||||
|
||||
@@ -1,64 +0,0 @@
|
||||
var ripple = require('ripple-lib');
|
||||
|
||||
var v = {
|
||||
seed: "snoPBrXtMeMyMHUVTgbuqAfg1SUTb",
|
||||
addr: "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"
|
||||
};
|
||||
|
||||
var remote = ripple.Remote.from_config({
|
||||
"trusted" : true,
|
||||
"websocket_ip" : "127.0.0.1",
|
||||
"websocket_port" : 5006,
|
||||
"websocket_ssl" : false,
|
||||
"local_signing" : true
|
||||
});
|
||||
|
||||
var tx_json = {
|
||||
"Account" : v.addr,
|
||||
"Amount" : "10000000",
|
||||
"Destination" : "rEu2ULPiEQm1BAL8pYzmXnNX1aFX9sCks",
|
||||
"Fee" : "10",
|
||||
"Flags" : 0,
|
||||
"Sequence" : 3,
|
||||
"TransactionType" : "Payment"
|
||||
|
||||
//"SigningPubKey": '0396941B22791A448E5877A44CE98434DB217D6FB97D63F0DAD23BE49ED45173C9'
|
||||
};
|
||||
|
||||
remote.on('connected', function () {
|
||||
var req = remote.request_sign(v.seed, tx_json);
|
||||
req.message.debug_signing = true;
|
||||
req.on('success', function (result) {
|
||||
console.log("SERVER RESULT");
|
||||
console.log(result);
|
||||
|
||||
var sim = {};
|
||||
var tx = remote.transaction();
|
||||
tx.tx_json = tx_json;
|
||||
tx._secret = v.seed;
|
||||
tx.complete();
|
||||
var unsigned = tx.serialize().to_hex();
|
||||
tx.sign();
|
||||
|
||||
sim.tx_blob = tx.serialize().to_hex();
|
||||
sim.tx_json = tx.tx_json;
|
||||
sim.tx_signing_hash = tx.signing_hash().to_hex();
|
||||
sim.tx_unsigned = unsigned;
|
||||
|
||||
console.log("\nLOCAL RESULT");
|
||||
console.log(sim);
|
||||
|
||||
remote.connect(false);
|
||||
});
|
||||
req.on('error', function (err) {
|
||||
if (err.error === "remoteError" && err.remote.error === "srcActNotFound") {
|
||||
console.log("Please fund account "+v.addr+" to run this test.");
|
||||
} else {
|
||||
console.log('error', err);
|
||||
}
|
||||
remote.connect(false);
|
||||
});
|
||||
req.request();
|
||||
|
||||
});
|
||||
remote.connect();
|
||||
@@ -1,18 +0,0 @@
|
||||
#!/usr/bin/node
|
||||
//
|
||||
// Returns a Gravatar style hash as per: http://en.gravatar.com/site/implement/hash/
|
||||
//
|
||||
|
||||
if (3 != process.argv.length) {
|
||||
process.stderr.write("Usage: " + process.argv[1] + " email_address\n\nReturns gravatar style hash.\n");
|
||||
process.exit(1);
|
||||
|
||||
} else {
|
||||
var md5 = require('crypto').createHash('md5');
|
||||
|
||||
md5.update(process.argv[2].trim().toLowerCase());
|
||||
|
||||
process.stdout.write(md5.digest('hex') + "\n");
|
||||
}
|
||||
|
||||
// vim:sw=2:sts=2:ts=8:et
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/usr/bin/node
|
||||
//
|
||||
// This program allows IE 9 ripple-clients to make websocket connections to
|
||||
// rippled using flash. As IE 9 does not have websocket support, this required
|
||||
// if you wish to support IE 9 ripple-clients.
|
||||
//
|
||||
// http://www.lightsphere.com/dev/articles/flash_socket_policy.html
|
||||
//
|
||||
// For better security, be sure to set the Port below to the port of your
|
||||
// [websocket_public_port].
|
||||
//
|
||||
|
||||
var net = require("net"),
|
||||
port = "*",
|
||||
domains = ["*:"+port]; // Domain:Port
|
||||
|
||||
net.createServer(
|
||||
function(socket) {
|
||||
socket.write("<?xml version='1.0' ?>\n");
|
||||
socket.write("<!DOCTYPE cross-domain-policy SYSTEM 'http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd'>\n");
|
||||
socket.write("<cross-domain-policy>\n");
|
||||
domains.forEach(
|
||||
function(domain) {
|
||||
var parts = domain.split(':');
|
||||
socket.write("\t<allow-access-from domain='" + parts[0] + "' to-ports='" + parts[1] + "' />\n");
|
||||
}
|
||||
);
|
||||
socket.write("</cross-domain-policy>\n");
|
||||
socket.end();
|
||||
}
|
||||
).listen(843);
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user