mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
Merge branch 'develop' into ximinez/fixed-ledger-entries
This commit is contained in:
1
.github/workflows/reusable-build.yml
vendored
1
.github/workflows/reusable-build.yml
vendored
@@ -48,6 +48,7 @@ jobs:
|
|||||||
name: Build ${{ inputs.config_name }}
|
name: Build ${{ inputs.config_name }}
|
||||||
runs-on: ${{ fromJSON(inputs.runs_on) }}
|
runs-on: ${{ fromJSON(inputs.runs_on) }}
|
||||||
container: ${{ inputs.image != '' && inputs.image || null }}
|
container: ${{ inputs.image != '' && inputs.image || null }}
|
||||||
|
timeout-minutes: 60
|
||||||
steps:
|
steps:
|
||||||
- name: Cleanup workspace
|
- name: Cleanup workspace
|
||||||
if: ${{ runner.os == 'macOS' }}
|
if: ${{ runner.os == 'macOS' }}
|
||||||
|
|||||||
1
.github/workflows/reusable-test.yml
vendored
1
.github/workflows/reusable-test.yml
vendored
@@ -31,6 +31,7 @@ jobs:
|
|||||||
name: Test ${{ inputs.config_name }}
|
name: Test ${{ inputs.config_name }}
|
||||||
runs-on: ${{ fromJSON(inputs.runs_on) }}
|
runs-on: ${{ fromJSON(inputs.runs_on) }}
|
||||||
container: ${{ inputs.image != '' && inputs.image || null }}
|
container: ${{ inputs.image != '' && inputs.image || null }}
|
||||||
|
timeout-minutes: 30
|
||||||
steps:
|
steps:
|
||||||
- name: Download rippled artifact
|
- name: Download rippled artifact
|
||||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
|
||||||
|
|||||||
@@ -975,6 +975,47 @@
|
|||||||
# number of ledger records online. Must be greater
|
# number of ledger records online. Must be greater
|
||||||
# than or equal to ledger_history.
|
# than or equal to ledger_history.
|
||||||
#
|
#
|
||||||
|
# Optional keys for NuDB only:
|
||||||
|
#
|
||||||
|
# nudb_block_size EXPERIMENTAL: Block size in bytes for NuDB storage.
|
||||||
|
# Must be a power of 2 between 4096 and 32768. Default is 4096.
|
||||||
|
#
|
||||||
|
# This parameter controls the fundamental storage unit
|
||||||
|
# size for NuDB's internal data structures. The choice
|
||||||
|
# of block size can significantly impact performance
|
||||||
|
# depending on your storage hardware and filesystem:
|
||||||
|
#
|
||||||
|
# - 4096 bytes: Optimal for most standard SSDs and
|
||||||
|
# traditional filesystems (ext4, NTFS, HFS+).
|
||||||
|
# Provides good balance of performance and storage
|
||||||
|
# efficiency. Recommended for most deployments.
|
||||||
|
# Minimizes memory footprint and provides consistent
|
||||||
|
# low-latency access patterns across diverse hardware.
|
||||||
|
#
|
||||||
|
# - 8192-16384 bytes: May improve performance on
|
||||||
|
# high-end NVMe SSDs and copy-on-write filesystems
|
||||||
|
# like ZFS or Btrfs that benefit from larger block
|
||||||
|
# alignment. Can reduce metadata overhead for large
|
||||||
|
# databases. Offers better sequential throughput and
|
||||||
|
# reduced I/O operations at the cost of higher memory
|
||||||
|
# usage per operation.
|
||||||
|
#
|
||||||
|
# - 32768 bytes (32K): Maximum supported block size
|
||||||
|
# for high-performance scenarios with very fast
|
||||||
|
# storage. May increase memory usage and reduce
|
||||||
|
# efficiency for smaller databases. Best suited for
|
||||||
|
# enterprise environments with abundant RAM.
|
||||||
|
#
|
||||||
|
# Performance testing is recommended before deploying
|
||||||
|
# any non-default block size in production environments.
|
||||||
|
#
|
||||||
|
# Note: This setting cannot be changed after database
|
||||||
|
# creation without rebuilding the entire database.
|
||||||
|
# Choose carefully based on your hardware and expected
|
||||||
|
# database size.
|
||||||
|
#
|
||||||
|
# Example: nudb_block_size=4096
|
||||||
|
#
|
||||||
# These keys modify the behavior of online_delete, and thus are only
|
# These keys modify the behavior of online_delete, and thus are only
|
||||||
# relevant if online_delete is defined and non-zero:
|
# relevant if online_delete is defined and non-zero:
|
||||||
#
|
#
|
||||||
@@ -1471,6 +1512,7 @@ secure_gateway = 127.0.0.1
|
|||||||
[node_db]
|
[node_db]
|
||||||
type=NuDB
|
type=NuDB
|
||||||
path=/var/lib/rippled/db/nudb
|
path=/var/lib/rippled/db/nudb
|
||||||
|
nudb_block_size=4096
|
||||||
online_delete=512
|
online_delete=512
|
||||||
advisory_delete=0
|
advisory_delete=0
|
||||||
|
|
||||||
|
|||||||
@@ -909,7 +909,7 @@ TRANSACTION(ttVAULT_DEPOSIT, 68, VaultDeposit,
|
|||||||
TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
|
TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw,
|
||||||
Delegation::delegatable,
|
Delegation::delegatable,
|
||||||
featureSingleAssetVault,
|
featureSingleAssetVault,
|
||||||
mayDeleteMPT | mustModifyVault,
|
mayDeleteMPT | mayAuthorizeMPT | mustModifyVault,
|
||||||
({
|
({
|
||||||
{sfVaultID, soeREQUIRED},
|
{sfVaultID, soeREQUIRED},
|
||||||
{sfAmount, soeREQUIRED, soeMPTSupported},
|
{sfAmount, soeREQUIRED, soeMPTSupported},
|
||||||
|
|||||||
@@ -1242,6 +1242,12 @@ addEmptyHolding(
|
|||||||
// If the line already exists, don't create it again.
|
// If the line already exists, don't create it again.
|
||||||
if (view.read(index))
|
if (view.read(index))
|
||||||
return tecDUPLICATE;
|
return tecDUPLICATE;
|
||||||
|
|
||||||
|
// Can the account cover the trust line reserve ?
|
||||||
|
std::uint32_t const ownerCount = sleDst->at(sfOwnerCount);
|
||||||
|
if (priorBalance < view.fees().accountReserve(ownerCount + 1))
|
||||||
|
return tecNO_LINE_INSUF_RESERVE;
|
||||||
|
|
||||||
return trustCreate(
|
return trustCreate(
|
||||||
view,
|
view,
|
||||||
high,
|
high,
|
||||||
|
|||||||
@@ -59,14 +59,15 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
testSequences()
|
testSequences()
|
||||||
{
|
{
|
||||||
using namespace test::jtx;
|
using namespace test::jtx;
|
||||||
|
Account issuer{"issuer"};
|
||||||
|
Account owner{"owner"};
|
||||||
|
Account depositor{"depositor"};
|
||||||
|
Account charlie{"charlie"}; // authorized 3rd party
|
||||||
|
Account dave{"dave"};
|
||||||
|
|
||||||
auto const testSequence = [this](
|
auto const testSequence = [&, this](
|
||||||
std::string const& prefix,
|
std::string const& prefix,
|
||||||
Env& env,
|
Env& env,
|
||||||
Account const& issuer,
|
|
||||||
Account const& owner,
|
|
||||||
Account const& depositor,
|
|
||||||
Account const& charlie,
|
|
||||||
Vault& vault,
|
Vault& vault,
|
||||||
PrettyAsset const& asset) {
|
PrettyAsset const& asset) {
|
||||||
auto [tx, keylet] = vault.create({.owner = owner, .asset = asset});
|
auto [tx, keylet] = vault.create({.owner = owner, .asset = asset});
|
||||||
@@ -104,11 +105,9 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
|
|
||||||
// Several 3rd party accounts which cannot receive funds
|
// Several 3rd party accounts which cannot receive funds
|
||||||
Account alice{"alice"};
|
Account alice{"alice"};
|
||||||
Account dave{"dave"};
|
|
||||||
Account erin{"erin"}; // not authorized by issuer
|
Account erin{"erin"}; // not authorized by issuer
|
||||||
env.fund(XRP(1000), alice, dave, erin);
|
env.fund(XRP(1000), alice, erin);
|
||||||
env(fset(alice, asfDepositAuth));
|
env(fset(alice, asfDepositAuth));
|
||||||
env(fset(dave, asfRequireDest));
|
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
{
|
{
|
||||||
@@ -328,19 +327,6 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
env.close();
|
env.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
|
||||||
testcase(
|
|
||||||
prefix +
|
|
||||||
" fail to withdraw with tag but without destination");
|
|
||||||
auto tx = vault.withdraw(
|
|
||||||
{.depositor = depositor,
|
|
||||||
.id = keylet.key,
|
|
||||||
.amount = asset(1000)});
|
|
||||||
tx[sfDestinationTag] = "0";
|
|
||||||
env(tx, ter(temMALFORMED));
|
|
||||||
env.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!asset.raw().native())
|
if (!asset.raw().native())
|
||||||
{
|
{
|
||||||
testcase(
|
testcase(
|
||||||
@@ -368,12 +354,49 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
env.close();
|
env.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
testcase(prefix + " withdraw to 3rd party lsfRequireDestTag");
|
||||||
|
auto tx = vault.withdraw(
|
||||||
|
{.depositor = depositor,
|
||||||
|
.id = keylet.key,
|
||||||
|
.amount = asset(50)});
|
||||||
|
tx[sfDestination] = dave.human();
|
||||||
|
tx[sfDestinationTag] = "0";
|
||||||
|
env(tx);
|
||||||
|
env.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
testcase(prefix + " deposit again");
|
||||||
|
auto tx = vault.deposit(
|
||||||
|
{.depositor = dave, .id = keylet.key, .amount = asset(50)});
|
||||||
|
env(tx);
|
||||||
|
env.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
testcase(prefix + " fail to withdraw lsfRequireDestTag");
|
||||||
|
auto tx = vault.withdraw(
|
||||||
|
{.depositor = dave, .id = keylet.key, .amount = asset(50)});
|
||||||
|
env(tx, ter{tecDST_TAG_NEEDED});
|
||||||
|
env.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
testcase(prefix + " withdraw with tag");
|
||||||
|
auto tx = vault.withdraw(
|
||||||
|
{.depositor = dave, .id = keylet.key, .amount = asset(50)});
|
||||||
|
tx[sfDestinationTag] = "0";
|
||||||
|
env(tx);
|
||||||
|
env.close();
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
testcase(prefix + " withdraw to authorized 3rd party");
|
testcase(prefix + " withdraw to authorized 3rd party");
|
||||||
auto tx = vault.withdraw(
|
auto tx = vault.withdraw(
|
||||||
{.depositor = depositor,
|
{.depositor = depositor,
|
||||||
.id = keylet.key,
|
.id = keylet.key,
|
||||||
.amount = asset(100)});
|
.amount = asset(50)});
|
||||||
tx[sfDestination] = charlie.human();
|
tx[sfDestination] = charlie.human();
|
||||||
env(tx);
|
env(tx);
|
||||||
env.close();
|
env.close();
|
||||||
@@ -523,76 +546,52 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
auto testCases = [this, &testSequence](
|
auto testCases = [&, this](
|
||||||
std::string prefix,
|
std::string prefix,
|
||||||
std::function<PrettyAsset(
|
std::function<PrettyAsset(Env & env)> setup) {
|
||||||
Env & env,
|
|
||||||
Account const& issuer,
|
|
||||||
Account const& owner,
|
|
||||||
Account const& depositor,
|
|
||||||
Account const& charlie)> setup) {
|
|
||||||
Env env{*this, testable_amendments() | featureSingleAssetVault};
|
Env env{*this, testable_amendments() | featureSingleAssetVault};
|
||||||
Account issuer{"issuer"};
|
|
||||||
Account owner{"owner"};
|
|
||||||
Account depositor{"depositor"};
|
|
||||||
Account charlie{"charlie"}; // authorized 3rd party
|
|
||||||
Vault vault{env};
|
Vault vault{env};
|
||||||
env.fund(XRP(1000), issuer, owner, depositor, charlie);
|
env.fund(XRP(1000), issuer, owner, depositor, charlie, dave);
|
||||||
env.close();
|
env.close();
|
||||||
env(fset(issuer, asfAllowTrustLineClawback));
|
env(fset(issuer, asfAllowTrustLineClawback));
|
||||||
env(fset(issuer, asfRequireAuth));
|
env(fset(issuer, asfRequireAuth));
|
||||||
|
env(fset(dave, asfRequireDest));
|
||||||
env.close();
|
env.close();
|
||||||
env.require(flags(issuer, asfAllowTrustLineClawback));
|
env.require(flags(issuer, asfAllowTrustLineClawback));
|
||||||
env.require(flags(issuer, asfRequireAuth));
|
env.require(flags(issuer, asfRequireAuth));
|
||||||
|
|
||||||
PrettyAsset asset = setup(env, issuer, owner, depositor, charlie);
|
PrettyAsset asset = setup(env);
|
||||||
testSequence(
|
testSequence(prefix, env, vault, asset);
|
||||||
prefix, env, issuer, owner, depositor, charlie, vault, asset);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
testCases(
|
testCases("XRP", [&](Env& env) -> PrettyAsset {
|
||||||
"XRP",
|
|
||||||
[](Env& env,
|
|
||||||
Account const& issuer,
|
|
||||||
Account const& owner,
|
|
||||||
Account const& depositor,
|
|
||||||
Account const& charlie) -> PrettyAsset {
|
|
||||||
return {xrpIssue(), 1'000'000};
|
return {xrpIssue(), 1'000'000};
|
||||||
});
|
});
|
||||||
|
|
||||||
testCases(
|
testCases("IOU", [&](Env& env) -> Asset {
|
||||||
"IOU",
|
|
||||||
[](Env& env,
|
|
||||||
Account const& issuer,
|
|
||||||
Account const& owner,
|
|
||||||
Account const& depositor,
|
|
||||||
Account const& charlie) -> Asset {
|
|
||||||
PrettyAsset asset = issuer["IOU"];
|
PrettyAsset asset = issuer["IOU"];
|
||||||
env(trust(owner, asset(1000)));
|
env(trust(owner, asset(1000)));
|
||||||
env(trust(depositor, asset(1000)));
|
env(trust(depositor, asset(1000)));
|
||||||
env(trust(charlie, asset(1000)));
|
env(trust(charlie, asset(1000)));
|
||||||
|
env(trust(dave, asset(1000)));
|
||||||
env(trust(issuer, asset(0), owner, tfSetfAuth));
|
env(trust(issuer, asset(0), owner, tfSetfAuth));
|
||||||
env(trust(issuer, asset(0), depositor, tfSetfAuth));
|
env(trust(issuer, asset(0), depositor, tfSetfAuth));
|
||||||
env(trust(issuer, asset(0), charlie, tfSetfAuth));
|
env(trust(issuer, asset(0), charlie, tfSetfAuth));
|
||||||
|
env(trust(issuer, asset(0), dave, tfSetfAuth));
|
||||||
env(pay(issuer, depositor, asset(1000)));
|
env(pay(issuer, depositor, asset(1000)));
|
||||||
env.close();
|
env.close();
|
||||||
return asset;
|
return asset;
|
||||||
});
|
});
|
||||||
|
|
||||||
testCases(
|
testCases("MPT", [&](Env& env) -> Asset {
|
||||||
"MPT",
|
|
||||||
[](Env& env,
|
|
||||||
Account const& issuer,
|
|
||||||
Account const& owner,
|
|
||||||
Account const& depositor,
|
|
||||||
Account const& charlie) -> Asset {
|
|
||||||
MPTTester mptt{env, issuer, mptInitNoFund};
|
MPTTester mptt{env, issuer, mptInitNoFund};
|
||||||
mptt.create(
|
mptt.create(
|
||||||
{.flags =
|
{.flags = tfMPTCanClawback | tfMPTCanTransfer | tfMPTCanLock});
|
||||||
tfMPTCanClawback | tfMPTCanTransfer | tfMPTCanLock});
|
|
||||||
PrettyAsset asset = mptt.issuanceID();
|
PrettyAsset asset = mptt.issuanceID();
|
||||||
mptt.authorize({.account = depositor});
|
mptt.authorize({.account = depositor});
|
||||||
mptt.authorize({.account = charlie});
|
mptt.authorize({.account = charlie});
|
||||||
|
mptt.authorize({.account = dave});
|
||||||
env(pay(issuer, depositor, asset(1000)));
|
env(pay(issuer, depositor, asset(1000)));
|
||||||
env.close();
|
env.close();
|
||||||
return asset;
|
return asset;
|
||||||
@@ -1672,6 +1671,7 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
{
|
{
|
||||||
bool enableClawback = true;
|
bool enableClawback = true;
|
||||||
bool requireAuth = true;
|
bool requireAuth = true;
|
||||||
|
int initialXRP = 1000;
|
||||||
};
|
};
|
||||||
|
|
||||||
auto testCase = [this](
|
auto testCase = [this](
|
||||||
@@ -1688,7 +1688,7 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
Account issuer{"issuer"};
|
Account issuer{"issuer"};
|
||||||
Account owner{"owner"};
|
Account owner{"owner"};
|
||||||
Account depositor{"depositor"};
|
Account depositor{"depositor"};
|
||||||
env.fund(XRP(1000), issuer, owner, depositor);
|
env.fund(XRP(args.initialXRP), issuer, owner, depositor);
|
||||||
env.close();
|
env.close();
|
||||||
Vault vault{env};
|
Vault vault{env};
|
||||||
|
|
||||||
@@ -1868,9 +1868,7 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
PrettyAsset const& asset,
|
PrettyAsset const& asset,
|
||||||
Vault& vault,
|
Vault& vault,
|
||||||
MPTTester& mptt) {
|
MPTTester& mptt) {
|
||||||
testcase(
|
testcase("MPT depositor without MPToken, auth required");
|
||||||
"MPT 3rd party without MPToken cannot be withdrawal "
|
|
||||||
"destination");
|
|
||||||
|
|
||||||
auto [tx, keylet] =
|
auto [tx, keylet] =
|
||||||
vault.create({.owner = owner, .asset = asset});
|
vault.create({.owner = owner, .asset = asset});
|
||||||
@@ -1880,10 +1878,32 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
tx = vault.deposit(
|
tx = vault.deposit(
|
||||||
{.depositor = depositor,
|
{.depositor = depositor,
|
||||||
.id = keylet.key,
|
.id = keylet.key,
|
||||||
.amount = asset(100)});
|
.amount = asset(1000)});
|
||||||
env(tx);
|
env(tx);
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
|
{
|
||||||
|
// Remove depositor MPToken and it will not be re-created
|
||||||
|
mptt.authorize(
|
||||||
|
{.account = depositor, .flags = tfMPTUnauthorize});
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
auto const mptoken =
|
||||||
|
keylet::mptoken(mptt.issuanceID(), depositor);
|
||||||
|
auto const sleMPT1 = env.le(mptoken);
|
||||||
|
BEAST_EXPECT(sleMPT1 == nullptr);
|
||||||
|
|
||||||
|
tx = vault.withdraw(
|
||||||
|
{.depositor = depositor,
|
||||||
|
.id = keylet.key,
|
||||||
|
.amount = asset(100)});
|
||||||
|
env(tx, ter{tecNO_AUTH});
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
auto const sleMPT2 = env.le(mptoken);
|
||||||
|
BEAST_EXPECT(sleMPT2 == nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
// Set destination to 3rd party without MPToken
|
// Set destination to 3rd party without MPToken
|
||||||
Account charlie{"charlie"};
|
Account charlie{"charlie"};
|
||||||
@@ -1898,7 +1918,7 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
env(tx, ter(tecNO_AUTH));
|
env(tx, ter(tecNO_AUTH));
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{.requireAuth = false});
|
{.requireAuth = true});
|
||||||
|
|
||||||
testCase(
|
testCase(
|
||||||
[this](
|
[this](
|
||||||
@@ -1909,7 +1929,7 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
PrettyAsset const& asset,
|
PrettyAsset const& asset,
|
||||||
Vault& vault,
|
Vault& vault,
|
||||||
MPTTester& mptt) {
|
MPTTester& mptt) {
|
||||||
testcase("MPT depositor without MPToken cannot withdraw");
|
testcase("MPT depositor without MPToken, no auth required");
|
||||||
|
|
||||||
auto [tx, keylet] =
|
auto [tx, keylet] =
|
||||||
vault.create({.owner = owner, .asset = asset});
|
vault.create({.owner = owner, .asset = asset});
|
||||||
@@ -1917,7 +1937,6 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
env.close();
|
env.close();
|
||||||
auto v = env.le(keylet);
|
auto v = env.le(keylet);
|
||||||
BEAST_EXPECT(v);
|
BEAST_EXPECT(v);
|
||||||
MPTID share = (*v)[sfShareMPTID];
|
|
||||||
|
|
||||||
tx = vault.deposit(
|
tx = vault.deposit(
|
||||||
{.depositor = depositor,
|
{.depositor = depositor,
|
||||||
@@ -1927,41 +1946,120 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
{
|
{
|
||||||
// Remove depositor's MPToken and withdraw will fail
|
// Remove depositor's MPToken and it will be re-created
|
||||||
mptt.authorize(
|
mptt.authorize(
|
||||||
{.account = depositor, .flags = tfMPTUnauthorize});
|
{.account = depositor, .flags = tfMPTUnauthorize});
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
auto const mptoken =
|
auto const mptoken =
|
||||||
env.le(keylet::mptoken(mptt.issuanceID(), depositor));
|
keylet::mptoken(mptt.issuanceID(), depositor);
|
||||||
BEAST_EXPECT(mptoken == nullptr);
|
auto const sleMPT1 = env.le(mptoken);
|
||||||
|
BEAST_EXPECT(sleMPT1 == nullptr);
|
||||||
|
|
||||||
tx = vault.withdraw(
|
tx = vault.withdraw(
|
||||||
{.depositor = depositor,
|
{.depositor = depositor,
|
||||||
.id = keylet.key,
|
.id = keylet.key,
|
||||||
.amount = asset(100)});
|
.amount = asset(100)});
|
||||||
env(tx, ter(tecNO_AUTH));
|
env(tx);
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
auto const sleMPT2 = env.le(mptoken);
|
||||||
|
BEAST_EXPECT(sleMPT2 != nullptr);
|
||||||
|
BEAST_EXPECT(sleMPT2->at(sfMPTAmount) == 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
// Restore depositor's MPToken and withdraw will succeed
|
// Remove 3rd party MPToken and it will not be re-created
|
||||||
mptt.authorize({.account = depositor});
|
mptt.authorize(
|
||||||
|
{.account = owner, .flags = tfMPTUnauthorize});
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
|
auto const mptoken =
|
||||||
|
keylet::mptoken(mptt.issuanceID(), owner);
|
||||||
|
auto const sleMPT1 = env.le(mptoken);
|
||||||
|
BEAST_EXPECT(sleMPT1 == nullptr);
|
||||||
|
|
||||||
tx = vault.withdraw(
|
tx = vault.withdraw(
|
||||||
{.depositor = depositor,
|
{.depositor = depositor,
|
||||||
.id = keylet.key,
|
.id = keylet.key,
|
||||||
.amount = asset(1000)});
|
.amount = asset(100)});
|
||||||
env(tx);
|
tx[sfDestination] = owner.human();
|
||||||
|
env(tx, ter(tecNO_AUTH));
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
// Withdraw removed shares MPToken
|
auto const sleMPT2 = env.le(mptoken);
|
||||||
auto const mptSle =
|
BEAST_EXPECT(sleMPT2 == nullptr);
|
||||||
env.le(keylet::mptoken(share, depositor.id()));
|
|
||||||
BEAST_EXPECT(mptSle == nullptr);
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{.requireAuth = false});
|
{.requireAuth = false});
|
||||||
|
|
||||||
|
auto const [acctReserve, incReserve] = [this]() -> std::pair<int, int> {
|
||||||
|
Env env{*this, testable_amendments()};
|
||||||
|
return {
|
||||||
|
env.current()->fees().accountReserve(0).drops() /
|
||||||
|
DROPS_PER_XRP.drops(),
|
||||||
|
env.current()->fees().increment.drops() /
|
||||||
|
DROPS_PER_XRP.drops()};
|
||||||
|
}();
|
||||||
|
|
||||||
|
testCase(
|
||||||
|
[&, this](
|
||||||
|
Env& env,
|
||||||
|
Account const& issuer,
|
||||||
|
Account const& owner,
|
||||||
|
Account const& depositor,
|
||||||
|
PrettyAsset const& asset,
|
||||||
|
Vault& vault,
|
||||||
|
MPTTester& mptt) {
|
||||||
|
testcase("MPT failed reserve to re-create MPToken");
|
||||||
|
|
||||||
|
auto [tx, keylet] =
|
||||||
|
vault.create({.owner = owner, .asset = asset});
|
||||||
|
env(tx);
|
||||||
|
env.close();
|
||||||
|
auto v = env.le(keylet);
|
||||||
|
BEAST_EXPECT(v);
|
||||||
|
|
||||||
|
env(pay(depositor, owner, asset(1000)));
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
tx = vault.deposit(
|
||||||
|
{.depositor = owner,
|
||||||
|
.id = keylet.key,
|
||||||
|
.amount = asset(1000)}); // all assets held by owner
|
||||||
|
env(tx);
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
{
|
||||||
|
// Remove owners's MPToken and it will not be re-created
|
||||||
|
mptt.authorize(
|
||||||
|
{.account = owner, .flags = tfMPTUnauthorize});
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
auto const mptoken =
|
||||||
|
keylet::mptoken(mptt.issuanceID(), owner);
|
||||||
|
auto const sleMPT = env.le(mptoken);
|
||||||
|
BEAST_EXPECT(sleMPT == nullptr);
|
||||||
|
|
||||||
|
// No reserve to create MPToken for asset in VaultWithdraw
|
||||||
|
tx = vault.withdraw(
|
||||||
|
{.depositor = owner,
|
||||||
|
.id = keylet.key,
|
||||||
|
.amount = asset(100)});
|
||||||
|
env(tx, ter{tecINSUFFICIENT_RESERVE});
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
env(pay(depositor, owner, XRP(incReserve)));
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
// Withdraw can now create asset MPToken, tx will succeed
|
||||||
|
env(tx);
|
||||||
|
env.close();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{.requireAuth = false,
|
||||||
|
.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||||
|
|
||||||
testCase([this](
|
testCase([this](
|
||||||
Env& env,
|
Env& env,
|
||||||
Account const& issuer,
|
Account const& issuer,
|
||||||
@@ -2320,9 +2418,15 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
{
|
{
|
||||||
using namespace test::jtx;
|
using namespace test::jtx;
|
||||||
|
|
||||||
|
struct CaseArgs
|
||||||
|
{
|
||||||
|
int initialXRP = 1000;
|
||||||
|
double transferRate = 1.0;
|
||||||
|
};
|
||||||
|
|
||||||
auto testCase =
|
auto testCase =
|
||||||
[&,
|
[&, this](
|
||||||
this](std::function<void(
|
std::function<void(
|
||||||
Env & env,
|
Env & env,
|
||||||
Account const& owner,
|
Account const& owner,
|
||||||
Account const& issuer,
|
Account const& issuer,
|
||||||
@@ -2330,13 +2434,14 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
std::function<Account(ripple::Keylet)> vaultAccount,
|
std::function<Account(ripple::Keylet)> vaultAccount,
|
||||||
Vault& vault,
|
Vault& vault,
|
||||||
PrettyAsset const& asset,
|
PrettyAsset const& asset,
|
||||||
std::function<MPTID(ripple::Keylet)> issuanceId)> test) {
|
std::function<MPTID(ripple::Keylet)> issuanceId)> test,
|
||||||
|
CaseArgs args = {}) {
|
||||||
Env env{*this, testable_amendments() | featureSingleAssetVault};
|
Env env{*this, testable_amendments() | featureSingleAssetVault};
|
||||||
Account const owner{"owner"};
|
Account const owner{"owner"};
|
||||||
Account const issuer{"issuer"};
|
Account const issuer{"issuer"};
|
||||||
Account const charlie{"charlie"};
|
Account const charlie{"charlie"};
|
||||||
Vault vault{env};
|
Vault vault{env};
|
||||||
env.fund(XRP(1000), issuer, owner, charlie);
|
env.fund(XRP(args.initialXRP), issuer, owner, charlie);
|
||||||
env(fset(issuer, asfAllowTrustLineClawback));
|
env(fset(issuer, asfAllowTrustLineClawback));
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
@@ -2344,7 +2449,7 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
env.trust(asset(1000), owner);
|
env.trust(asset(1000), owner);
|
||||||
env.trust(asset(1000), charlie);
|
env.trust(asset(1000), charlie);
|
||||||
env(pay(issuer, owner, asset(200)));
|
env(pay(issuer, owner, asset(200)));
|
||||||
env(rate(issuer, 1.25));
|
env(rate(issuer, args.transferRate));
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
auto const vaultAccount =
|
auto const vaultAccount =
|
||||||
@@ -2505,7 +2610,8 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
env.close();
|
env.close();
|
||||||
});
|
});
|
||||||
|
|
||||||
testCase([&, this](
|
testCase(
|
||||||
|
[&, this](
|
||||||
Env& env,
|
Env& env,
|
||||||
Account const& owner,
|
Account const& owner,
|
||||||
Account const& issuer,
|
Account const& issuer,
|
||||||
@@ -2516,12 +2622,15 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
auto issuanceId) {
|
auto issuanceId) {
|
||||||
testcase("IOU transfer fees not applied");
|
testcase("IOU transfer fees not applied");
|
||||||
|
|
||||||
auto [tx, keylet] = vault.create({.owner = owner, .asset = asset});
|
auto [tx, keylet] =
|
||||||
|
vault.create({.owner = owner, .asset = asset});
|
||||||
env(tx);
|
env(tx);
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
env(vault.deposit(
|
env(vault.deposit(
|
||||||
{.depositor = owner, .id = keylet.key, .amount = asset(100)}));
|
{.depositor = owner,
|
||||||
|
.id = keylet.key,
|
||||||
|
.amount = asset(100)}));
|
||||||
env.close();
|
env.close();
|
||||||
|
|
||||||
auto const issue = asset.raw().get<Issue>();
|
auto const issue = asset.raw().get<Issue>();
|
||||||
@@ -2544,7 +2653,8 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
|
|
||||||
// transfer fees ignored on clawback
|
// transfer fees ignored on clawback
|
||||||
BEAST_EXPECT(env.balance(owner, issue) == asset(100));
|
BEAST_EXPECT(env.balance(owner, issue) == asset(100));
|
||||||
BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(50));
|
BEAST_EXPECT(
|
||||||
|
env.balance(vaultAccount(keylet), issue) == asset(50));
|
||||||
|
|
||||||
env(vault.withdraw(
|
env(vault.withdraw(
|
||||||
{.depositor = owner,
|
{.depositor = owner,
|
||||||
@@ -2553,7 +2663,8 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
|
|
||||||
// transfer fees ignored on withdraw
|
// transfer fees ignored on withdraw
|
||||||
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
|
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
|
||||||
BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(30));
|
BEAST_EXPECT(
|
||||||
|
env.balance(vaultAccount(keylet), issue) == asset(30));
|
||||||
|
|
||||||
{
|
{
|
||||||
auto tx = vault.withdraw(
|
auto tx = vault.withdraw(
|
||||||
@@ -2567,11 +2678,13 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
// transfer fees ignored on withdraw to 3rd party
|
// transfer fees ignored on withdraw to 3rd party
|
||||||
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
|
BEAST_EXPECT(env.balance(owner, issue) == asset(120));
|
||||||
BEAST_EXPECT(env.balance(charlie, issue) == asset(30));
|
BEAST_EXPECT(env.balance(charlie, issue) == asset(30));
|
||||||
BEAST_EXPECT(env.balance(vaultAccount(keylet), issue) == asset(0));
|
BEAST_EXPECT(
|
||||||
|
env.balance(vaultAccount(keylet), issue) == asset(0));
|
||||||
|
|
||||||
env(vault.del({.owner = owner, .id = keylet.key}));
|
env(vault.del({.owner = owner, .id = keylet.key}));
|
||||||
env.close();
|
env.close();
|
||||||
});
|
},
|
||||||
|
CaseArgs{.transferRate = 1.25});
|
||||||
|
|
||||||
testCase([&, this](
|
testCase([&, this](
|
||||||
Env& env,
|
Env& env,
|
||||||
@@ -2713,6 +2826,103 @@ class Vault_test : public beast::unit_test::suite
|
|||||||
env(tx1);
|
env(tx1);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
auto const [acctReserve, incReserve] = [this]() -> std::pair<int, int> {
|
||||||
|
Env env{*this, testable_amendments()};
|
||||||
|
return {
|
||||||
|
env.current()->fees().accountReserve(0).drops() /
|
||||||
|
DROPS_PER_XRP.drops(),
|
||||||
|
env.current()->fees().increment.drops() /
|
||||||
|
DROPS_PER_XRP.drops()};
|
||||||
|
}();
|
||||||
|
|
||||||
|
testCase(
|
||||||
|
[&, this](
|
||||||
|
Env& env,
|
||||||
|
Account const& owner,
|
||||||
|
Account const& issuer,
|
||||||
|
Account const& charlie,
|
||||||
|
auto,
|
||||||
|
Vault& vault,
|
||||||
|
PrettyAsset const& asset,
|
||||||
|
auto&&...) {
|
||||||
|
testcase("IOU no trust line to depositor no reserve");
|
||||||
|
auto [tx, keylet] =
|
||||||
|
vault.create({.owner = owner, .asset = asset});
|
||||||
|
env(tx);
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
// reset limit, so deposit of all funds will delete the trust
|
||||||
|
// line
|
||||||
|
env.trust(asset(0), owner);
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
env(vault.deposit(
|
||||||
|
{.depositor = owner,
|
||||||
|
.id = keylet.key,
|
||||||
|
.amount = asset(200)}));
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
auto trustline =
|
||||||
|
env.le(keylet::line(owner, asset.raw().get<Issue>()));
|
||||||
|
BEAST_EXPECT(trustline == nullptr);
|
||||||
|
|
||||||
|
// Fail because not enough reserve to create trust line
|
||||||
|
tx = vault.withdraw(
|
||||||
|
{.depositor = owner,
|
||||||
|
.id = keylet.key,
|
||||||
|
.amount = asset(10)});
|
||||||
|
env(tx, ter{tecNO_LINE_INSUF_RESERVE});
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
env(pay(charlie, owner, XRP(incReserve)));
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
// Withdraw can now create trust line, will succeed
|
||||||
|
env(tx);
|
||||||
|
env.close();
|
||||||
|
},
|
||||||
|
CaseArgs{.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||||
|
|
||||||
|
testCase(
|
||||||
|
[&, this](
|
||||||
|
Env& env,
|
||||||
|
Account const& owner,
|
||||||
|
Account const& issuer,
|
||||||
|
Account const& charlie,
|
||||||
|
auto,
|
||||||
|
Vault& vault,
|
||||||
|
PrettyAsset const& asset,
|
||||||
|
auto&&...) {
|
||||||
|
testcase("IOU no reserve for share MPToken");
|
||||||
|
auto [tx, keylet] =
|
||||||
|
vault.create({.owner = owner, .asset = asset});
|
||||||
|
env(tx);
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
env(pay(owner, charlie, asset(100)));
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
// Use up some reserve on tickets
|
||||||
|
env(ticket::create(charlie, 2));
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
// Fail because not enough reserve to create MPToken for shares
|
||||||
|
tx = vault.deposit(
|
||||||
|
{.depositor = charlie,
|
||||||
|
.id = keylet.key,
|
||||||
|
.amount = asset(100)});
|
||||||
|
env(tx, ter{tecINSUFFICIENT_RESERVE});
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
env(pay(issuer, charlie, XRP(incReserve)));
|
||||||
|
env.close();
|
||||||
|
|
||||||
|
// Deposit can now create MPToken, will succeed
|
||||||
|
env(tx);
|
||||||
|
env.close();
|
||||||
|
},
|
||||||
|
CaseArgs{.initialXRP = acctReserve + incReserve * 4 - 1});
|
||||||
|
|
||||||
testCase([&, this](
|
testCase([&, this](
|
||||||
Env& env,
|
Env& env,
|
||||||
Account const& owner,
|
Account const& owner,
|
||||||
|
|||||||
478
src/test/nodestore/NuDBFactory_test.cpp
Normal file
478
src/test/nodestore/NuDBFactory_test.cpp
Normal file
@@ -0,0 +1,478 @@
|
|||||||
|
//------------------------------------------------------------------------------
|
||||||
|
/*
|
||||||
|
This file is part of rippled: https://github.com/ripple/rippled
|
||||||
|
Copyright (c) 2012, 2013 Ripple Labs Inc.
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
//==============================================================================
|
||||||
|
|
||||||
|
#include <test/nodestore/TestBase.h>
|
||||||
|
#include <test/unit_test/SuiteJournal.h>
|
||||||
|
|
||||||
|
#include <xrpld/nodestore/DummyScheduler.h>
|
||||||
|
#include <xrpld/nodestore/Manager.h>
|
||||||
|
|
||||||
|
#include <xrpl/basics/BasicConfig.h>
|
||||||
|
#include <xrpl/basics/ByteUtilities.h>
|
||||||
|
#include <xrpl/beast/utility/temp_dir.h>
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
|
namespace ripple {
|
||||||
|
namespace NodeStore {
|
||||||
|
|
||||||
|
class NuDBFactory_test : public TestBase
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
// Helper function to create a Section with specified parameters
|
||||||
|
Section
|
||||||
|
createSection(std::string const& path, std::string const& blockSize = "")
|
||||||
|
{
|
||||||
|
Section params;
|
||||||
|
params.set("type", "nudb");
|
||||||
|
params.set("path", path);
|
||||||
|
if (!blockSize.empty())
|
||||||
|
params.set("nudb_block_size", blockSize);
|
||||||
|
return params;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to create a backend and test basic functionality
|
||||||
|
bool
|
||||||
|
testBackendFunctionality(
|
||||||
|
Section const& params,
|
||||||
|
std::size_t expectedBlocksize)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
DummyScheduler scheduler;
|
||||||
|
test::SuiteJournal journal("NuDBFactory_test", *this);
|
||||||
|
|
||||||
|
auto backend = Manager::instance().make_Backend(
|
||||||
|
params, megabytes(4), scheduler, journal);
|
||||||
|
|
||||||
|
if (!BEAST_EXPECT(backend))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (!BEAST_EXPECT(backend->getBlockSize() == expectedBlocksize))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
backend->open();
|
||||||
|
|
||||||
|
if (!BEAST_EXPECT(backend->isOpen()))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
// Test basic store/fetch functionality
|
||||||
|
auto batch = createPredictableBatch(10, 12345);
|
||||||
|
storeBatch(*backend, batch);
|
||||||
|
|
||||||
|
Batch copy;
|
||||||
|
fetchCopyOfBatch(*backend, ©, batch);
|
||||||
|
|
||||||
|
backend->close();
|
||||||
|
|
||||||
|
return areBatchesEqual(batch, copy);
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to test log messages
|
||||||
|
void
|
||||||
|
testLogMessage(
|
||||||
|
Section const& params,
|
||||||
|
beast::severities::Severity level,
|
||||||
|
std::string const& expectedMessage)
|
||||||
|
{
|
||||||
|
test::StreamSink sink(level);
|
||||||
|
beast::Journal journal(sink);
|
||||||
|
|
||||||
|
DummyScheduler scheduler;
|
||||||
|
auto backend = Manager::instance().make_Backend(
|
||||||
|
params, megabytes(4), scheduler, journal);
|
||||||
|
|
||||||
|
std::string logOutput = sink.messages().str();
|
||||||
|
BEAST_EXPECT(logOutput.find(expectedMessage) != std::string::npos);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function to test power of two validation
|
||||||
|
void
|
||||||
|
testPowerOfTwoValidation(std::string const& size, bool shouldWork)
|
||||||
|
{
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), size);
|
||||||
|
|
||||||
|
test::StreamSink sink(beast::severities::kWarning);
|
||||||
|
beast::Journal journal(sink);
|
||||||
|
|
||||||
|
DummyScheduler scheduler;
|
||||||
|
auto backend = Manager::instance().make_Backend(
|
||||||
|
params, megabytes(4), scheduler, journal);
|
||||||
|
|
||||||
|
std::string logOutput = sink.messages().str();
|
||||||
|
bool hasWarning =
|
||||||
|
logOutput.find("Invalid nudb_block_size") != std::string::npos;
|
||||||
|
|
||||||
|
BEAST_EXPECT(hasWarning == !shouldWork);
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
void
|
||||||
|
testDefaultBlockSize()
|
||||||
|
{
|
||||||
|
testcase("Default block size (no nudb_block_size specified)");
|
||||||
|
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path());
|
||||||
|
|
||||||
|
// Should work with default 4096 block size
|
||||||
|
BEAST_EXPECT(testBackendFunctionality(params, 4096));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
testValidBlockSizes()
|
||||||
|
{
|
||||||
|
testcase("Valid block sizes");
|
||||||
|
|
||||||
|
std::vector<std::size_t> validSizes = {4096, 8192, 16384, 32768};
|
||||||
|
|
||||||
|
for (auto const& size : validSizes)
|
||||||
|
{
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), to_string(size));
|
||||||
|
|
||||||
|
BEAST_EXPECT(testBackendFunctionality(params, size));
|
||||||
|
}
|
||||||
|
// Empty value is ignored by the config parser, so uses the
|
||||||
|
// default
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), "");
|
||||||
|
|
||||||
|
BEAST_EXPECT(testBackendFunctionality(params, 4096));
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
testInvalidBlockSizes()
|
||||||
|
{
|
||||||
|
testcase("Invalid block sizes");
|
||||||
|
|
||||||
|
std::vector<std::string> invalidSizes = {
|
||||||
|
"2048", // Too small
|
||||||
|
"1024", // Too small
|
||||||
|
"65536", // Too large
|
||||||
|
"131072", // Too large
|
||||||
|
"5000", // Not power of 2
|
||||||
|
"6000", // Not power of 2
|
||||||
|
"10000", // Not power of 2
|
||||||
|
"0", // Zero
|
||||||
|
"-1", // Negative
|
||||||
|
"abc", // Non-numeric
|
||||||
|
"4k", // Invalid format
|
||||||
|
"4096.5" // Decimal
|
||||||
|
};
|
||||||
|
|
||||||
|
for (auto const& size : invalidSizes)
|
||||||
|
{
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), size);
|
||||||
|
|
||||||
|
// Fails
|
||||||
|
BEAST_EXPECT(!testBackendFunctionality(params, 4096));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test whitespace cases separately since lexical_cast may handle them
|
||||||
|
std::vector<std::string> whitespaceInvalidSizes = {
|
||||||
|
"4096 ", // Trailing space - might be handled by lexical_cast
|
||||||
|
" 4096" // Leading space - might be handled by lexical_cast
|
||||||
|
};
|
||||||
|
|
||||||
|
for (auto const& size : whitespaceInvalidSizes)
|
||||||
|
{
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), size);
|
||||||
|
|
||||||
|
// Fails
|
||||||
|
BEAST_EXPECT(!testBackendFunctionality(params, 4096));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
testLogMessages()
|
||||||
|
{
|
||||||
|
testcase("Log message verification");
|
||||||
|
|
||||||
|
// Test valid custom block size logging
|
||||||
|
{
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), "8192");
|
||||||
|
|
||||||
|
testLogMessage(
|
||||||
|
params,
|
||||||
|
beast::severities::kInfo,
|
||||||
|
"Using custom NuDB block size: 8192");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test invalid block size failure
|
||||||
|
{
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), "5000");
|
||||||
|
|
||||||
|
test::StreamSink sink(beast::severities::kWarning);
|
||||||
|
beast::Journal journal(sink);
|
||||||
|
|
||||||
|
DummyScheduler scheduler;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto backend = Manager::instance().make_Backend(
|
||||||
|
params, megabytes(4), scheduler, journal);
|
||||||
|
fail();
|
||||||
|
}
|
||||||
|
catch (std::exception const& e)
|
||||||
|
{
|
||||||
|
std::string logOutput{e.what()};
|
||||||
|
BEAST_EXPECT(
|
||||||
|
logOutput.find("Invalid nudb_block_size: 5000") !=
|
||||||
|
std::string::npos);
|
||||||
|
BEAST_EXPECT(
|
||||||
|
logOutput.find(
|
||||||
|
"Must be power of 2 between 4096 and 32768") !=
|
||||||
|
std::string::npos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test non-numeric value failure
|
||||||
|
{
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), "invalid");
|
||||||
|
|
||||||
|
test::StreamSink sink(beast::severities::kWarning);
|
||||||
|
beast::Journal journal(sink);
|
||||||
|
|
||||||
|
DummyScheduler scheduler;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto backend = Manager::instance().make_Backend(
|
||||||
|
params, megabytes(4), scheduler, journal);
|
||||||
|
|
||||||
|
fail();
|
||||||
|
}
|
||||||
|
catch (std::exception const& e)
|
||||||
|
{
|
||||||
|
std::string logOutput{e.what()};
|
||||||
|
BEAST_EXPECT(
|
||||||
|
logOutput.find("Invalid nudb_block_size value: invalid") !=
|
||||||
|
std::string::npos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
testPowerOfTwoValidation()
|
||||||
|
{
|
||||||
|
testcase("Power of 2 validation logic");
|
||||||
|
|
||||||
|
// Test edge cases around valid range
|
||||||
|
std::vector<std::pair<std::string, bool>> testCases = {
|
||||||
|
{"4095", false}, // Just below minimum
|
||||||
|
{"4096", true}, // Minimum valid
|
||||||
|
{"4097", false}, // Just above minimum, not power of 2
|
||||||
|
{"8192", true}, // Valid power of 2
|
||||||
|
{"8193", false}, // Just above valid power of 2
|
||||||
|
{"16384", true}, // Valid power of 2
|
||||||
|
{"32768", true}, // Maximum valid
|
||||||
|
{"32769", false}, // Just above maximum
|
||||||
|
{"65536", false} // Power of 2 but too large
|
||||||
|
};
|
||||||
|
|
||||||
|
for (auto const& [size, shouldWork] : testCases)
|
||||||
|
{
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), size);
|
||||||
|
|
||||||
|
// We test the validation logic by catching exceptions for invalid
|
||||||
|
// values
|
||||||
|
test::StreamSink sink(beast::severities::kWarning);
|
||||||
|
beast::Journal journal(sink);
|
||||||
|
|
||||||
|
DummyScheduler scheduler;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto backend = Manager::instance().make_Backend(
|
||||||
|
params, megabytes(4), scheduler, journal);
|
||||||
|
BEAST_EXPECT(shouldWork);
|
||||||
|
}
|
||||||
|
catch (std::exception const& e)
|
||||||
|
{
|
||||||
|
std::string logOutput{e.what()};
|
||||||
|
BEAST_EXPECT(
|
||||||
|
logOutput.find("Invalid nudb_block_size") !=
|
||||||
|
std::string::npos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
testBothConstructorVariants()
|
||||||
|
{
|
||||||
|
testcase("Both constructor variants work with custom block size");
|
||||||
|
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), "16384");
|
||||||
|
|
||||||
|
DummyScheduler scheduler;
|
||||||
|
test::SuiteJournal journal("NuDBFactory_test", *this);
|
||||||
|
|
||||||
|
// Test first constructor (without nudb::context)
|
||||||
|
{
|
||||||
|
auto backend1 = Manager::instance().make_Backend(
|
||||||
|
params, megabytes(4), scheduler, journal);
|
||||||
|
BEAST_EXPECT(backend1 != nullptr);
|
||||||
|
BEAST_EXPECT(testBackendFunctionality(params, 16384));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test second constructor (with nudb::context)
|
||||||
|
// Note: This would require access to nudb::context, which might not be
|
||||||
|
// easily testable without more complex setup. For now, we test that
|
||||||
|
// the factory can create backends with the first constructor.
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
testConfigurationParsing()
|
||||||
|
{
|
||||||
|
testcase("Configuration parsing edge cases");
|
||||||
|
|
||||||
|
// Test that whitespace is handled correctly
|
||||||
|
std::vector<std::string> validFormats = {
|
||||||
|
"8192" // Basic valid format
|
||||||
|
};
|
||||||
|
|
||||||
|
// Test whitespace handling separately since lexical_cast behavior may
|
||||||
|
// vary
|
||||||
|
std::vector<std::string> whitespaceFormats = {
|
||||||
|
" 8192", // Leading space - may or may not be handled by
|
||||||
|
// lexical_cast
|
||||||
|
"8192 " // Trailing space - may or may not be handled by
|
||||||
|
// lexical_cast
|
||||||
|
};
|
||||||
|
|
||||||
|
// Test basic valid format
|
||||||
|
for (auto const& format : validFormats)
|
||||||
|
{
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), format);
|
||||||
|
|
||||||
|
test::StreamSink sink(beast::severities::kInfo);
|
||||||
|
beast::Journal journal(sink);
|
||||||
|
|
||||||
|
DummyScheduler scheduler;
|
||||||
|
auto backend = Manager::instance().make_Backend(
|
||||||
|
params, megabytes(4), scheduler, journal);
|
||||||
|
|
||||||
|
// Should log success message for valid values
|
||||||
|
std::string logOutput = sink.messages().str();
|
||||||
|
bool hasSuccessMessage =
|
||||||
|
logOutput.find("Using custom NuDB block size") !=
|
||||||
|
std::string::npos;
|
||||||
|
BEAST_EXPECT(hasSuccessMessage);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test whitespace formats - these should work if lexical_cast handles
|
||||||
|
// them
|
||||||
|
for (auto const& format : whitespaceFormats)
|
||||||
|
{
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), format);
|
||||||
|
|
||||||
|
// Use a lower threshold to capture both info and warning messages
|
||||||
|
test::StreamSink sink(beast::severities::kDebug);
|
||||||
|
beast::Journal journal(sink);
|
||||||
|
|
||||||
|
DummyScheduler scheduler;
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto backend = Manager::instance().make_Backend(
|
||||||
|
params, megabytes(4), scheduler, journal);
|
||||||
|
fail();
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
// Fails
|
||||||
|
BEAST_EXPECT(!testBackendFunctionality(params, 8192));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
testDataPersistence()
|
||||||
|
{
|
||||||
|
testcase("Data persistence with different block sizes");
|
||||||
|
|
||||||
|
std::vector<std::string> blockSizes = {
|
||||||
|
"4096", "8192", "16384", "32768"};
|
||||||
|
|
||||||
|
for (auto const& size : blockSizes)
|
||||||
|
{
|
||||||
|
beast::temp_dir tempDir;
|
||||||
|
auto params = createSection(tempDir.path(), size);
|
||||||
|
|
||||||
|
DummyScheduler scheduler;
|
||||||
|
test::SuiteJournal journal("NuDBFactory_test", *this);
|
||||||
|
|
||||||
|
// Create test data
|
||||||
|
auto batch = createPredictableBatch(50, 54321);
|
||||||
|
|
||||||
|
// Store data
|
||||||
|
{
|
||||||
|
auto backend = Manager::instance().make_Backend(
|
||||||
|
params, megabytes(4), scheduler, journal);
|
||||||
|
backend->open();
|
||||||
|
storeBatch(*backend, batch);
|
||||||
|
backend->close();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve data in new backend instance
|
||||||
|
{
|
||||||
|
auto backend = Manager::instance().make_Backend(
|
||||||
|
params, megabytes(4), scheduler, journal);
|
||||||
|
backend->open();
|
||||||
|
|
||||||
|
Batch copy;
|
||||||
|
fetchCopyOfBatch(*backend, ©, batch);
|
||||||
|
|
||||||
|
BEAST_EXPECT(areBatchesEqual(batch, copy));
|
||||||
|
backend->close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
run() override
|
||||||
|
{
|
||||||
|
testDefaultBlockSize();
|
||||||
|
testValidBlockSizes();
|
||||||
|
testInvalidBlockSizes();
|
||||||
|
testLogMessages();
|
||||||
|
testPowerOfTwoValidation();
|
||||||
|
testBothConstructorVariants();
|
||||||
|
testConfigurationParsing();
|
||||||
|
testDataPersistence();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
BEAST_DEFINE_TESTSUITE(NuDBFactory, ripple_core, ripple);
|
||||||
|
|
||||||
|
} // namespace NodeStore
|
||||||
|
} // namespace ripple
|
||||||
168
src/test/rpc/ServerDefinitions_test.cpp
Normal file
168
src/test/rpc/ServerDefinitions_test.cpp
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
//------------------------------------------------------------------------------
|
||||||
|
/*
|
||||||
|
This file is part of rippled: https://github.com/ripple/rippled
|
||||||
|
Copyright (c) 2023 Ripple Labs Inc.
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
//==============================================================================
|
||||||
|
|
||||||
|
#include <test/jtx.h>
|
||||||
|
|
||||||
|
#include <xrpl/beast/unit_test.h>
|
||||||
|
#include <xrpl/protocol/jss.h>
|
||||||
|
|
||||||
|
namespace ripple {
|
||||||
|
|
||||||
|
namespace test {
|
||||||
|
|
||||||
|
class ServerDefinitions_test : public beast::unit_test::suite
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void
|
||||||
|
testServerDefinitions()
|
||||||
|
{
|
||||||
|
testcase("server_definitions");
|
||||||
|
|
||||||
|
using namespace test::jtx;
|
||||||
|
|
||||||
|
{
|
||||||
|
Env env(*this);
|
||||||
|
auto const result = env.rpc("server_definitions");
|
||||||
|
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
||||||
|
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
||||||
|
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
|
||||||
|
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
||||||
|
BEAST_EXPECT(
|
||||||
|
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
||||||
|
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
||||||
|
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
|
||||||
|
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
||||||
|
|
||||||
|
// test a random element of each result
|
||||||
|
// (testing the whole output would be difficult to maintain)
|
||||||
|
|
||||||
|
{
|
||||||
|
auto const firstField = result[jss::result][jss::FIELDS][0u];
|
||||||
|
BEAST_EXPECT(firstField[0u].asString() == "Generic");
|
||||||
|
BEAST_EXPECT(
|
||||||
|
firstField[1][jss::isSerialized].asBool() == false);
|
||||||
|
BEAST_EXPECT(
|
||||||
|
firstField[1][jss::isSigningField].asBool() == false);
|
||||||
|
BEAST_EXPECT(firstField[1][jss::isVLEncoded].asBool() == false);
|
||||||
|
BEAST_EXPECT(firstField[1][jss::nth].asUInt() == 0);
|
||||||
|
BEAST_EXPECT(firstField[1][jss::type].asString() == "Unknown");
|
||||||
|
}
|
||||||
|
|
||||||
|
BEAST_EXPECT(
|
||||||
|
result[jss::result][jss::LEDGER_ENTRY_TYPES]["AccountRoot"]
|
||||||
|
.asUInt() == 97);
|
||||||
|
BEAST_EXPECT(
|
||||||
|
result[jss::result][jss::TRANSACTION_RESULTS]["tecDIR_FULL"]
|
||||||
|
.asUInt() == 121);
|
||||||
|
BEAST_EXPECT(
|
||||||
|
result[jss::result][jss::TRANSACTION_TYPES]["Payment"]
|
||||||
|
.asUInt() == 0);
|
||||||
|
BEAST_EXPECT(
|
||||||
|
result[jss::result][jss::TYPES]["AccountID"].asUInt() == 8);
|
||||||
|
|
||||||
|
// check exception SFields
|
||||||
|
{
|
||||||
|
auto const fieldExists = [&](std::string name) {
|
||||||
|
for (auto& field : result[jss::result][jss::FIELDS])
|
||||||
|
{
|
||||||
|
if (field[0u].asString() == name)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
BEAST_EXPECT(fieldExists("Generic"));
|
||||||
|
BEAST_EXPECT(fieldExists("Invalid"));
|
||||||
|
BEAST_EXPECT(fieldExists("ObjectEndMarker"));
|
||||||
|
BEAST_EXPECT(fieldExists("ArrayEndMarker"));
|
||||||
|
BEAST_EXPECT(fieldExists("taker_gets_funded"));
|
||||||
|
BEAST_EXPECT(fieldExists("taker_pays_funded"));
|
||||||
|
BEAST_EXPECT(fieldExists("hash"));
|
||||||
|
BEAST_EXPECT(fieldExists("index"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// test that base_uint types are replaced with "Hash" prefix
|
||||||
|
{
|
||||||
|
auto const types = result[jss::result][jss::TYPES];
|
||||||
|
BEAST_EXPECT(types["Hash128"].asUInt() == 4);
|
||||||
|
BEAST_EXPECT(types["Hash160"].asUInt() == 17);
|
||||||
|
BEAST_EXPECT(types["Hash192"].asUInt() == 21);
|
||||||
|
BEAST_EXPECT(types["Hash256"].asUInt() == 5);
|
||||||
|
BEAST_EXPECT(types["Hash384"].asUInt() == 22);
|
||||||
|
BEAST_EXPECT(types["Hash512"].asUInt() == 23);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test providing the same hash
|
||||||
|
{
|
||||||
|
Env env(*this);
|
||||||
|
auto const firstResult = env.rpc("server_definitions");
|
||||||
|
auto const hash = firstResult[jss::result][jss::hash].asString();
|
||||||
|
auto const hashParam =
|
||||||
|
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
|
||||||
|
|
||||||
|
auto const result =
|
||||||
|
env.rpc("json", "server_definitions", hashParam);
|
||||||
|
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
||||||
|
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
||||||
|
BEAST_EXPECT(!result[jss::result].isMember(jss::FIELDS));
|
||||||
|
BEAST_EXPECT(
|
||||||
|
!result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
||||||
|
BEAST_EXPECT(
|
||||||
|
!result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
||||||
|
BEAST_EXPECT(!result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
||||||
|
BEAST_EXPECT(!result[jss::result].isMember(jss::TYPES));
|
||||||
|
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
||||||
|
}
|
||||||
|
|
||||||
|
// test providing a different hash
|
||||||
|
{
|
||||||
|
Env env(*this);
|
||||||
|
std::string const hash =
|
||||||
|
"54296160385A27154BFA70A239DD8E8FD4CC2DB7BA32D970BA3A5B132CF749"
|
||||||
|
"D1";
|
||||||
|
auto const hashParam =
|
||||||
|
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
|
||||||
|
|
||||||
|
auto const result =
|
||||||
|
env.rpc("json", "server_definitions", hashParam);
|
||||||
|
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
||||||
|
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
||||||
|
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
|
||||||
|
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
||||||
|
BEAST_EXPECT(
|
||||||
|
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
||||||
|
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
||||||
|
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
|
||||||
|
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
run() override
|
||||||
|
{
|
||||||
|
testServerDefinitions();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
BEAST_DEFINE_TESTSUITE(ServerDefinitions, rpc, ripple);
|
||||||
|
|
||||||
|
} // namespace test
|
||||||
|
} // namespace ripple
|
||||||
@@ -174,137 +174,10 @@ admin = 127.0.0.1
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
testServerDefinitions()
|
|
||||||
{
|
|
||||||
testcase("server_definitions");
|
|
||||||
|
|
||||||
using namespace test::jtx;
|
|
||||||
|
|
||||||
{
|
|
||||||
Env env(*this);
|
|
||||||
auto const result = env.rpc("server_definitions");
|
|
||||||
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
|
||||||
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
|
||||||
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
|
|
||||||
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
|
||||||
BEAST_EXPECT(
|
|
||||||
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
|
||||||
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
|
||||||
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
|
|
||||||
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
|
||||||
|
|
||||||
// test a random element of each result
|
|
||||||
// (testing the whole output would be difficult to maintain)
|
|
||||||
|
|
||||||
{
|
|
||||||
auto const firstField = result[jss::result][jss::FIELDS][0u];
|
|
||||||
BEAST_EXPECT(firstField[0u].asString() == "Generic");
|
|
||||||
BEAST_EXPECT(
|
|
||||||
firstField[1][jss::isSerialized].asBool() == false);
|
|
||||||
BEAST_EXPECT(
|
|
||||||
firstField[1][jss::isSigningField].asBool() == false);
|
|
||||||
BEAST_EXPECT(firstField[1][jss::isVLEncoded].asBool() == false);
|
|
||||||
BEAST_EXPECT(firstField[1][jss::nth].asUInt() == 0);
|
|
||||||
BEAST_EXPECT(firstField[1][jss::type].asString() == "Unknown");
|
|
||||||
}
|
|
||||||
|
|
||||||
BEAST_EXPECT(
|
|
||||||
result[jss::result][jss::LEDGER_ENTRY_TYPES]["AccountRoot"]
|
|
||||||
.asUInt() == 97);
|
|
||||||
BEAST_EXPECT(
|
|
||||||
result[jss::result][jss::TRANSACTION_RESULTS]["tecDIR_FULL"]
|
|
||||||
.asUInt() == 121);
|
|
||||||
BEAST_EXPECT(
|
|
||||||
result[jss::result][jss::TRANSACTION_TYPES]["Payment"]
|
|
||||||
.asUInt() == 0);
|
|
||||||
BEAST_EXPECT(
|
|
||||||
result[jss::result][jss::TYPES]["AccountID"].asUInt() == 8);
|
|
||||||
|
|
||||||
// check exception SFields
|
|
||||||
{
|
|
||||||
auto const fieldExists = [&](std::string name) {
|
|
||||||
for (auto& field : result[jss::result][jss::FIELDS])
|
|
||||||
{
|
|
||||||
if (field[0u].asString() == name)
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
BEAST_EXPECT(fieldExists("Generic"));
|
|
||||||
BEAST_EXPECT(fieldExists("Invalid"));
|
|
||||||
BEAST_EXPECT(fieldExists("ObjectEndMarker"));
|
|
||||||
BEAST_EXPECT(fieldExists("ArrayEndMarker"));
|
|
||||||
BEAST_EXPECT(fieldExists("taker_gets_funded"));
|
|
||||||
BEAST_EXPECT(fieldExists("taker_pays_funded"));
|
|
||||||
BEAST_EXPECT(fieldExists("hash"));
|
|
||||||
BEAST_EXPECT(fieldExists("index"));
|
|
||||||
}
|
|
||||||
|
|
||||||
// test that base_uint types are replaced with "Hash" prefix
|
|
||||||
{
|
|
||||||
auto const types = result[jss::result][jss::TYPES];
|
|
||||||
BEAST_EXPECT(types["Hash128"].asUInt() == 4);
|
|
||||||
BEAST_EXPECT(types["Hash160"].asUInt() == 17);
|
|
||||||
BEAST_EXPECT(types["Hash192"].asUInt() == 21);
|
|
||||||
BEAST_EXPECT(types["Hash256"].asUInt() == 5);
|
|
||||||
BEAST_EXPECT(types["Hash384"].asUInt() == 22);
|
|
||||||
BEAST_EXPECT(types["Hash512"].asUInt() == 23);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// test providing the same hash
|
|
||||||
{
|
|
||||||
Env env(*this);
|
|
||||||
auto const firstResult = env.rpc("server_definitions");
|
|
||||||
auto const hash = firstResult[jss::result][jss::hash].asString();
|
|
||||||
auto const hashParam =
|
|
||||||
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
|
|
||||||
|
|
||||||
auto const result =
|
|
||||||
env.rpc("json", "server_definitions", hashParam);
|
|
||||||
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
|
||||||
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
|
||||||
BEAST_EXPECT(!result[jss::result].isMember(jss::FIELDS));
|
|
||||||
BEAST_EXPECT(
|
|
||||||
!result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
|
||||||
BEAST_EXPECT(
|
|
||||||
!result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
|
||||||
BEAST_EXPECT(!result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
|
||||||
BEAST_EXPECT(!result[jss::result].isMember(jss::TYPES));
|
|
||||||
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
|
||||||
}
|
|
||||||
|
|
||||||
// test providing a different hash
|
|
||||||
{
|
|
||||||
Env env(*this);
|
|
||||||
std::string const hash =
|
|
||||||
"54296160385A27154BFA70A239DD8E8FD4CC2DB7BA32D970BA3A5B132CF749"
|
|
||||||
"D1";
|
|
||||||
auto const hashParam =
|
|
||||||
std::string("{ ") + "\"hash\": \"" + hash + "\"}";
|
|
||||||
|
|
||||||
auto const result =
|
|
||||||
env.rpc("json", "server_definitions", hashParam);
|
|
||||||
BEAST_EXPECT(!result[jss::result].isMember(jss::error));
|
|
||||||
BEAST_EXPECT(result[jss::result][jss::status] == "success");
|
|
||||||
BEAST_EXPECT(result[jss::result].isMember(jss::FIELDS));
|
|
||||||
BEAST_EXPECT(result[jss::result].isMember(jss::LEDGER_ENTRY_TYPES));
|
|
||||||
BEAST_EXPECT(
|
|
||||||
result[jss::result].isMember(jss::TRANSACTION_RESULTS));
|
|
||||||
BEAST_EXPECT(result[jss::result].isMember(jss::TRANSACTION_TYPES));
|
|
||||||
BEAST_EXPECT(result[jss::result].isMember(jss::TYPES));
|
|
||||||
BEAST_EXPECT(result[jss::result].isMember(jss::hash));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
run() override
|
run() override
|
||||||
{
|
{
|
||||||
testServerInfo();
|
testServerInfo();
|
||||||
testServerDefinitions();
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -1205,7 +1205,7 @@ EscrowFinish::doApply()
|
|||||||
{
|
{
|
||||||
// LCOV_EXCL_START
|
// LCOV_EXCL_START
|
||||||
JLOG(j_.fatal()) << "Unable to delete Escrow from recipient.";
|
JLOG(j_.fatal()) << "Unable to delete Escrow from recipient.";
|
||||||
return tefBAD_LEDGER; // LCOV_EXCL_LINE
|
return tefBAD_LEDGER;
|
||||||
// LCOV_EXCL_STOP
|
// LCOV_EXCL_STOP
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -202,8 +202,7 @@ VaultDeposit::doApply()
|
|||||||
else // !vault->isFlag(lsfVaultPrivate) || account_ == vault->at(sfOwner)
|
else // !vault->isFlag(lsfVaultPrivate) || account_ == vault->at(sfOwner)
|
||||||
{
|
{
|
||||||
// No authorization needed, but must ensure there is MPToken
|
// No authorization needed, but must ensure there is MPToken
|
||||||
auto sleMpt = view().read(keylet::mptoken(mptIssuanceID, account_));
|
if (!view().exists(keylet::mptoken(mptIssuanceID, account_)))
|
||||||
if (!sleMpt)
|
|
||||||
{
|
{
|
||||||
if (auto const err = authorizeMPToken(
|
if (auto const err = authorizeMPToken(
|
||||||
view(),
|
view(),
|
||||||
|
|||||||
@@ -52,12 +52,6 @@ VaultWithdraw::preflight(PreflightContext const& ctx)
|
|||||||
return temMALFORMED;
|
return temMALFORMED;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (ctx.tx.isFieldPresent(sfDestinationTag))
|
|
||||||
{
|
|
||||||
JLOG(ctx.j.debug()) << "VaultWithdraw: sfDestinationTag is set but "
|
|
||||||
"sfDestination is not";
|
|
||||||
return temMALFORMED;
|
|
||||||
}
|
|
||||||
|
|
||||||
return tesSUCCESS;
|
return tesSUCCESS;
|
||||||
}
|
}
|
||||||
@@ -116,37 +110,28 @@ VaultWithdraw::preclaim(PreclaimContext const& ctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto const account = ctx.tx[sfAccount];
|
auto const account = ctx.tx[sfAccount];
|
||||||
auto const dstAcct = [&]() -> AccountID {
|
auto const dstAcct = ctx.tx[~sfDestination].value_or(account);
|
||||||
if (ctx.tx.isFieldPresent(sfDestination))
|
|
||||||
return ctx.tx.getAccountID(sfDestination);
|
|
||||||
return account;
|
|
||||||
}();
|
|
||||||
|
|
||||||
// Withdrawal to a 3rd party destination account is essentially a transfer,
|
|
||||||
// via shares in the vault. Enforce all the usual asset transfer checks.
|
|
||||||
AuthType authType = AuthType::Legacy;
|
|
||||||
if (account != dstAcct)
|
|
||||||
{
|
|
||||||
auto const sleDst = ctx.view.read(keylet::account(dstAcct));
|
auto const sleDst = ctx.view.read(keylet::account(dstAcct));
|
||||||
if (sleDst == nullptr)
|
if (sleDst == nullptr)
|
||||||
return tecNO_DST;
|
return account == dstAcct ? tecINTERNAL : tecNO_DST;
|
||||||
|
|
||||||
if (sleDst->isFlag(lsfRequireDestTag) &&
|
if (sleDst->isFlag(lsfRequireDestTag) &&
|
||||||
!ctx.tx.isFieldPresent(sfDestinationTag))
|
!ctx.tx.isFieldPresent(sfDestinationTag))
|
||||||
return tecDST_TAG_NEEDED; // Cannot send without a tag
|
return tecDST_TAG_NEEDED; // Cannot send without a tag
|
||||||
|
|
||||||
if (sleDst->isFlag(lsfDepositAuth))
|
// Withdrawal to a 3rd party destination account is essentially a transfer,
|
||||||
|
// via shares in the vault. Enforce all the usual asset transfer checks.
|
||||||
|
if (account != dstAcct && sleDst->isFlag(lsfDepositAuth))
|
||||||
{
|
{
|
||||||
if (!ctx.view.exists(keylet::depositPreauth(dstAcct, account)))
|
if (!ctx.view.exists(keylet::depositPreauth(dstAcct, account)))
|
||||||
return tecNO_PERMISSION;
|
return tecNO_PERMISSION;
|
||||||
}
|
}
|
||||||
// The destination account must have consented to receive the asset by
|
|
||||||
// creating a RippleState or MPToken
|
|
||||||
authType = AuthType::StrongAuth;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destination MPToken (for an MPT) or trust line (for an IOU) must exist
|
// If sending to Account (i.e. not a transfer), we will also create (only
|
||||||
// if not sending to Account.
|
// if authorized) a trust line or MPToken as needed, in doApply().
|
||||||
|
// Destination MPToken or trust line must exist if _not_ sending to Account.
|
||||||
|
AuthType const authType =
|
||||||
|
account == dstAcct ? AuthType::WeakAuth : AuthType::StrongAuth;
|
||||||
if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct, authType);
|
if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct, authType);
|
||||||
!isTesSuccess(ter))
|
!isTesSuccess(ter))
|
||||||
return ter;
|
return ter;
|
||||||
@@ -307,11 +292,16 @@ VaultWithdraw::doApply()
|
|||||||
// else quietly ignore, account balance is not zero
|
// else quietly ignore, account balance is not zero
|
||||||
}
|
}
|
||||||
|
|
||||||
auto const dstAcct = [&]() -> AccountID {
|
auto const dstAcct = ctx_.tx[~sfDestination].value_or(account_);
|
||||||
if (ctx_.tx.isFieldPresent(sfDestination))
|
if (!vaultAsset.native() && //
|
||||||
return ctx_.tx.getAccountID(sfDestination);
|
dstAcct != vaultAsset.getIssuer() && //
|
||||||
return account_;
|
dstAcct == account_)
|
||||||
}();
|
{
|
||||||
|
if (auto const ter = addEmptyHolding(
|
||||||
|
view(), account_, mPriorBalance, vaultAsset, j_);
|
||||||
|
!isTesSuccess(ter) && ter != tecDUPLICATE)
|
||||||
|
return ter;
|
||||||
|
}
|
||||||
|
|
||||||
// Transfer assets from vault to depositor or destination account.
|
// Transfer assets from vault to depositor or destination account.
|
||||||
if (auto const ter = accountSend(
|
if (auto const ter = accountSend(
|
||||||
|
|||||||
@@ -53,6 +53,14 @@ public:
|
|||||||
virtual std::string
|
virtual std::string
|
||||||
getName() = 0;
|
getName() = 0;
|
||||||
|
|
||||||
|
/** Get the block size for backends that support it
|
||||||
|
*/
|
||||||
|
virtual std::optional<std::size_t>
|
||||||
|
getBlockSize() const
|
||||||
|
{
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
/** Open the backend.
|
/** Open the backend.
|
||||||
@param createIfMissing Create the database files if necessary.
|
@param createIfMissing Create the database files if necessary.
|
||||||
This allows the caller to catch exceptions.
|
This allows the caller to catch exceptions.
|
||||||
|
|||||||
@@ -24,6 +24,7 @@
|
|||||||
#include <xrpld/nodestore/detail/codec.h>
|
#include <xrpld/nodestore/detail/codec.h>
|
||||||
|
|
||||||
#include <xrpl/basics/contract.h>
|
#include <xrpl/basics/contract.h>
|
||||||
|
#include <xrpl/beast/core/LexicalCast.h>
|
||||||
#include <xrpl/beast/utility/instrumentation.h>
|
#include <xrpl/beast/utility/instrumentation.h>
|
||||||
|
|
||||||
#include <boost/filesystem.hpp>
|
#include <boost/filesystem.hpp>
|
||||||
@@ -52,6 +53,7 @@ public:
|
|||||||
size_t const keyBytes_;
|
size_t const keyBytes_;
|
||||||
std::size_t const burstSize_;
|
std::size_t const burstSize_;
|
||||||
std::string const name_;
|
std::string const name_;
|
||||||
|
std::size_t const blockSize_;
|
||||||
nudb::store db_;
|
nudb::store db_;
|
||||||
std::atomic<bool> deletePath_;
|
std::atomic<bool> deletePath_;
|
||||||
Scheduler& scheduler_;
|
Scheduler& scheduler_;
|
||||||
@@ -66,6 +68,7 @@ public:
|
|||||||
, keyBytes_(keyBytes)
|
, keyBytes_(keyBytes)
|
||||||
, burstSize_(burstSize)
|
, burstSize_(burstSize)
|
||||||
, name_(get(keyValues, "path"))
|
, name_(get(keyValues, "path"))
|
||||||
|
, blockSize_(parseBlockSize(name_, keyValues, journal))
|
||||||
, deletePath_(false)
|
, deletePath_(false)
|
||||||
, scheduler_(scheduler)
|
, scheduler_(scheduler)
|
||||||
{
|
{
|
||||||
@@ -85,6 +88,7 @@ public:
|
|||||||
, keyBytes_(keyBytes)
|
, keyBytes_(keyBytes)
|
||||||
, burstSize_(burstSize)
|
, burstSize_(burstSize)
|
||||||
, name_(get(keyValues, "path"))
|
, name_(get(keyValues, "path"))
|
||||||
|
, blockSize_(parseBlockSize(name_, keyValues, journal))
|
||||||
, db_(context)
|
, db_(context)
|
||||||
, deletePath_(false)
|
, deletePath_(false)
|
||||||
, scheduler_(scheduler)
|
, scheduler_(scheduler)
|
||||||
@@ -114,6 +118,12 @@ public:
|
|||||||
return name_;
|
return name_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::optional<std::size_t>
|
||||||
|
getBlockSize() const override
|
||||||
|
{
|
||||||
|
return blockSize_;
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
open(bool createIfMissing, uint64_t appType, uint64_t uid, uint64_t salt)
|
open(bool createIfMissing, uint64_t appType, uint64_t uid, uint64_t salt)
|
||||||
override
|
override
|
||||||
@@ -145,7 +155,7 @@ public:
|
|||||||
uid,
|
uid,
|
||||||
salt,
|
salt,
|
||||||
keyBytes_,
|
keyBytes_,
|
||||||
nudb::block_size(kp),
|
blockSize_,
|
||||||
0.50,
|
0.50,
|
||||||
ec);
|
ec);
|
||||||
if (ec == nudb::errc::file_exists)
|
if (ec == nudb::errc::file_exists)
|
||||||
@@ -361,6 +371,56 @@ public:
|
|||||||
{
|
{
|
||||||
return 3;
|
return 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
static std::size_t
|
||||||
|
parseBlockSize(
|
||||||
|
std::string const& name,
|
||||||
|
Section const& keyValues,
|
||||||
|
beast::Journal journal)
|
||||||
|
{
|
||||||
|
using namespace boost::filesystem;
|
||||||
|
auto const folder = path(name);
|
||||||
|
auto const kp = (folder / "nudb.key").string();
|
||||||
|
|
||||||
|
std::size_t const defaultSize =
|
||||||
|
nudb::block_size(kp); // Default 4K from NuDB
|
||||||
|
std::size_t blockSize = defaultSize;
|
||||||
|
std::string blockSizeStr;
|
||||||
|
|
||||||
|
if (!get_if_exists(keyValues, "nudb_block_size", blockSizeStr))
|
||||||
|
{
|
||||||
|
return blockSize; // Early return with default
|
||||||
|
}
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
std::size_t const parsedBlockSize =
|
||||||
|
beast::lexicalCastThrow<std::size_t>(blockSizeStr);
|
||||||
|
|
||||||
|
// Validate: must be power of 2 between 4K and 32K
|
||||||
|
if (parsedBlockSize < 4096 || parsedBlockSize > 32768 ||
|
||||||
|
(parsedBlockSize & (parsedBlockSize - 1)) != 0)
|
||||||
|
{
|
||||||
|
std::stringstream s;
|
||||||
|
s << "Invalid nudb_block_size: " << parsedBlockSize
|
||||||
|
<< ". Must be power of 2 between 4096 and 32768.";
|
||||||
|
Throw<std::runtime_error>(s.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
JLOG(journal.info())
|
||||||
|
<< "Using custom NuDB block size: " << parsedBlockSize
|
||||||
|
<< " bytes";
|
||||||
|
return parsedBlockSize;
|
||||||
|
}
|
||||||
|
catch (std::exception const& e)
|
||||||
|
{
|
||||||
|
std::stringstream s;
|
||||||
|
s << "Invalid nudb_block_size value: " << blockSizeStr
|
||||||
|
<< ". Error: " << e.what();
|
||||||
|
Throw<std::runtime_error>(s.str());
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|||||||
320
src/xrpld/rpc/handlers/ServerDefinitions.cpp
Normal file
320
src/xrpld/rpc/handlers/ServerDefinitions.cpp
Normal file
@@ -0,0 +1,320 @@
|
|||||||
|
//------------------------------------------------------------------------------
|
||||||
|
/*
|
||||||
|
This file is part of rippled: https://github.com/ripple/rippled
|
||||||
|
Copyright (c) 2023 Ripple Labs Inc.
|
||||||
|
|
||||||
|
Permission to use, copy, modify, and/or distribute this software for any
|
||||||
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||||
|
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||||
|
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||||
|
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||||
|
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||||
|
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||||
|
*/
|
||||||
|
//==============================================================================
|
||||||
|
|
||||||
|
#include <xrpld/rpc/Context.h>
|
||||||
|
#include <xrpld/rpc/Role.h>
|
||||||
|
|
||||||
|
#include <xrpl/json/json_value.h>
|
||||||
|
#include <xrpl/json/json_writer.h>
|
||||||
|
#include <xrpl/protocol/LedgerFormats.h>
|
||||||
|
#include <xrpl/protocol/SField.h>
|
||||||
|
#include <xrpl/protocol/TER.h>
|
||||||
|
#include <xrpl/protocol/TxFormats.h>
|
||||||
|
#include <xrpl/protocol/digest.h>
|
||||||
|
#include <xrpl/protocol/jss.h>
|
||||||
|
|
||||||
|
#include <boost/algorithm/string.hpp>
|
||||||
|
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
|
namespace ripple {
|
||||||
|
|
||||||
|
namespace detail {
|
||||||
|
|
||||||
|
class ServerDefinitions
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
std::string
|
||||||
|
// translate e.g. STI_LEDGERENTRY to LedgerEntry
|
||||||
|
translate(std::string const& inp);
|
||||||
|
|
||||||
|
uint256 defsHash_;
|
||||||
|
Json::Value defs_;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ServerDefinitions();
|
||||||
|
|
||||||
|
bool
|
||||||
|
hashMatches(uint256 hash) const
|
||||||
|
{
|
||||||
|
return defsHash_ == hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
Json::Value const&
|
||||||
|
get() const
|
||||||
|
{
|
||||||
|
return defs_;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
std::string
|
||||||
|
ServerDefinitions::translate(std::string const& inp)
|
||||||
|
{
|
||||||
|
auto replace = [&](char const* oldStr, char const* newStr) -> std::string {
|
||||||
|
std::string out = inp;
|
||||||
|
boost::replace_all(out, oldStr, newStr);
|
||||||
|
return out;
|
||||||
|
};
|
||||||
|
|
||||||
|
auto contains = [&](char const* s) -> bool {
|
||||||
|
return inp.find(s) != std::string::npos;
|
||||||
|
};
|
||||||
|
|
||||||
|
if (contains("UINT"))
|
||||||
|
{
|
||||||
|
if (contains("512") || contains("384") || contains("256") ||
|
||||||
|
contains("192") || contains("160") || contains("128"))
|
||||||
|
return replace("UINT", "Hash");
|
||||||
|
else
|
||||||
|
return replace("UINT", "UInt");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::unordered_map<std::string, std::string> replacements{
|
||||||
|
{"OBJECT", "STObject"},
|
||||||
|
{"ARRAY", "STArray"},
|
||||||
|
{"ACCOUNT", "AccountID"},
|
||||||
|
{"LEDGERENTRY", "LedgerEntry"},
|
||||||
|
{"NOTPRESENT", "NotPresent"},
|
||||||
|
{"PATHSET", "PathSet"},
|
||||||
|
{"VL", "Blob"},
|
||||||
|
{"XCHAIN_BRIDGE", "XChainBridge"},
|
||||||
|
};
|
||||||
|
|
||||||
|
if (auto const& it = replacements.find(inp); it != replacements.end())
|
||||||
|
{
|
||||||
|
return it->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string out;
|
||||||
|
size_t pos = 0;
|
||||||
|
std::string inpToProcess = inp;
|
||||||
|
|
||||||
|
// convert snake_case to CamelCase
|
||||||
|
for (;;)
|
||||||
|
{
|
||||||
|
pos = inpToProcess.find("_");
|
||||||
|
if (pos == std::string::npos)
|
||||||
|
pos = inpToProcess.size();
|
||||||
|
std::string token = inpToProcess.substr(0, pos);
|
||||||
|
if (token.size() > 1)
|
||||||
|
{
|
||||||
|
boost::algorithm::to_lower(token);
|
||||||
|
token.data()[0] -= ('a' - 'A');
|
||||||
|
out += token;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
out += token;
|
||||||
|
if (pos == inpToProcess.size())
|
||||||
|
break;
|
||||||
|
inpToProcess = inpToProcess.substr(pos + 1);
|
||||||
|
}
|
||||||
|
return out;
|
||||||
|
};
|
||||||
|
|
||||||
|
ServerDefinitions::ServerDefinitions() : defs_{Json::objectValue}
|
||||||
|
{
|
||||||
|
// populate SerializedTypeID names and values
|
||||||
|
defs_[jss::TYPES] = Json::objectValue;
|
||||||
|
|
||||||
|
defs_[jss::TYPES]["Done"] = -1;
|
||||||
|
std::map<int32_t, std::string> typeMap{{-1, "Done"}};
|
||||||
|
for (auto const& [rawName, typeValue] : sTypeMap)
|
||||||
|
{
|
||||||
|
std::string typeName =
|
||||||
|
translate(std::string(rawName).substr(4) /* remove STI_ */);
|
||||||
|
defs_[jss::TYPES][typeName] = typeValue;
|
||||||
|
typeMap[typeValue] = typeName;
|
||||||
|
}
|
||||||
|
|
||||||
|
// populate LedgerEntryType names and values
|
||||||
|
defs_[jss::LEDGER_ENTRY_TYPES] = Json::objectValue;
|
||||||
|
defs_[jss::LEDGER_ENTRY_TYPES][jss::Invalid] = -1;
|
||||||
|
|
||||||
|
for (auto const& f : LedgerFormats::getInstance())
|
||||||
|
{
|
||||||
|
defs_[jss::LEDGER_ENTRY_TYPES][f.getName()] = f.getType();
|
||||||
|
}
|
||||||
|
|
||||||
|
// populate SField serialization data
|
||||||
|
defs_[jss::FIELDS] = Json::arrayValue;
|
||||||
|
|
||||||
|
uint32_t i = 0;
|
||||||
|
{
|
||||||
|
Json::Value a = Json::arrayValue;
|
||||||
|
a[0U] = "Generic";
|
||||||
|
Json::Value v = Json::objectValue;
|
||||||
|
v[jss::nth] = 0;
|
||||||
|
v[jss::isVLEncoded] = false;
|
||||||
|
v[jss::isSerialized] = false;
|
||||||
|
v[jss::isSigningField] = false;
|
||||||
|
v[jss::type] = "Unknown";
|
||||||
|
a[1U] = v;
|
||||||
|
defs_[jss::FIELDS][i++] = a;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
Json::Value a = Json::arrayValue;
|
||||||
|
a[0U] = "Invalid";
|
||||||
|
Json::Value v = Json::objectValue;
|
||||||
|
v[jss::nth] = -1;
|
||||||
|
v[jss::isVLEncoded] = false;
|
||||||
|
v[jss::isSerialized] = false;
|
||||||
|
v[jss::isSigningField] = false;
|
||||||
|
v[jss::type] = "Unknown";
|
||||||
|
a[1U] = v;
|
||||||
|
defs_[jss::FIELDS][i++] = a;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
Json::Value a = Json::arrayValue;
|
||||||
|
a[0U] = "ObjectEndMarker";
|
||||||
|
Json::Value v = Json::objectValue;
|
||||||
|
v[jss::nth] = 1;
|
||||||
|
v[jss::isVLEncoded] = false;
|
||||||
|
v[jss::isSerialized] = true;
|
||||||
|
v[jss::isSigningField] = true;
|
||||||
|
v[jss::type] = "STObject";
|
||||||
|
a[1U] = v;
|
||||||
|
defs_[jss::FIELDS][i++] = a;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
Json::Value a = Json::arrayValue;
|
||||||
|
a[0U] = "ArrayEndMarker";
|
||||||
|
Json::Value v = Json::objectValue;
|
||||||
|
v[jss::nth] = 1;
|
||||||
|
v[jss::isVLEncoded] = false;
|
||||||
|
v[jss::isSerialized] = true;
|
||||||
|
v[jss::isSigningField] = true;
|
||||||
|
v[jss::type] = "STArray";
|
||||||
|
a[1U] = v;
|
||||||
|
defs_[jss::FIELDS][i++] = a;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
Json::Value a = Json::arrayValue;
|
||||||
|
a[0U] = "taker_gets_funded";
|
||||||
|
Json::Value v = Json::objectValue;
|
||||||
|
v[jss::nth] = 258;
|
||||||
|
v[jss::isVLEncoded] = false;
|
||||||
|
v[jss::isSerialized] = false;
|
||||||
|
v[jss::isSigningField] = false;
|
||||||
|
v[jss::type] = "Amount";
|
||||||
|
a[1U] = v;
|
||||||
|
defs_[jss::FIELDS][i++] = a;
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
Json::Value a = Json::arrayValue;
|
||||||
|
a[0U] = "taker_pays_funded";
|
||||||
|
Json::Value v = Json::objectValue;
|
||||||
|
v[jss::nth] = 259;
|
||||||
|
v[jss::isVLEncoded] = false;
|
||||||
|
v[jss::isSerialized] = false;
|
||||||
|
v[jss::isSigningField] = false;
|
||||||
|
v[jss::type] = "Amount";
|
||||||
|
a[1U] = v;
|
||||||
|
defs_[jss::FIELDS][i++] = a;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto const& [code, f] : ripple::SField::getKnownCodeToField())
|
||||||
|
{
|
||||||
|
if (f->fieldName == "")
|
||||||
|
continue;
|
||||||
|
|
||||||
|
Json::Value innerObj = Json::objectValue;
|
||||||
|
|
||||||
|
uint32_t type = f->fieldType;
|
||||||
|
|
||||||
|
innerObj[jss::nth] = f->fieldValue;
|
||||||
|
|
||||||
|
// whether the field is variable-length encoded
|
||||||
|
// this means that the length is included before the content
|
||||||
|
innerObj[jss::isVLEncoded] =
|
||||||
|
(type == 7U /* Blob */ || type == 8U /* AccountID */ ||
|
||||||
|
type == 19U /* Vector256 */);
|
||||||
|
|
||||||
|
// whether the field is included in serialization
|
||||||
|
innerObj[jss::isSerialized] =
|
||||||
|
(type < 10000 && f->fieldName != "hash" &&
|
||||||
|
f->fieldName != "index"); /* hash, index, TRANSACTION,
|
||||||
|
LEDGER_ENTRY, VALIDATION, METADATA */
|
||||||
|
|
||||||
|
// whether the field is included in serialization when signing
|
||||||
|
innerObj[jss::isSigningField] = f->shouldInclude(false);
|
||||||
|
|
||||||
|
innerObj[jss::type] = typeMap[type];
|
||||||
|
|
||||||
|
Json::Value innerArray = Json::arrayValue;
|
||||||
|
innerArray[0U] = f->fieldName;
|
||||||
|
innerArray[1U] = innerObj;
|
||||||
|
|
||||||
|
defs_[jss::FIELDS][i++] = innerArray;
|
||||||
|
}
|
||||||
|
|
||||||
|
// populate TER code names and values
|
||||||
|
defs_[jss::TRANSACTION_RESULTS] = Json::objectValue;
|
||||||
|
|
||||||
|
for (auto const& [code, terInfo] : transResults())
|
||||||
|
{
|
||||||
|
defs_[jss::TRANSACTION_RESULTS][terInfo.first] = code;
|
||||||
|
}
|
||||||
|
|
||||||
|
// populate TxType names and values
|
||||||
|
defs_[jss::TRANSACTION_TYPES] = Json::objectValue;
|
||||||
|
defs_[jss::TRANSACTION_TYPES][jss::Invalid] = -1;
|
||||||
|
for (auto const& f : TxFormats::getInstance())
|
||||||
|
{
|
||||||
|
defs_[jss::TRANSACTION_TYPES][f.getName()] = f.getType();
|
||||||
|
}
|
||||||
|
|
||||||
|
// generate hash
|
||||||
|
{
|
||||||
|
std::string const out = Json::FastWriter().write(defs_);
|
||||||
|
defsHash_ = ripple::sha512Half(ripple::Slice{out.data(), out.size()});
|
||||||
|
defs_[jss::hash] = to_string(defsHash_);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace detail
|
||||||
|
|
||||||
|
Json::Value
|
||||||
|
doServerDefinitions(RPC::JsonContext& context)
|
||||||
|
{
|
||||||
|
auto& params = context.params;
|
||||||
|
|
||||||
|
uint256 hash;
|
||||||
|
if (params.isMember(jss::hash))
|
||||||
|
{
|
||||||
|
if (!params[jss::hash].isString() ||
|
||||||
|
!hash.parseHex(params[jss::hash].asString()))
|
||||||
|
return RPC::invalid_field_error(jss::hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
static detail::ServerDefinitions const defs{};
|
||||||
|
if (defs.hashMatches(hash))
|
||||||
|
{
|
||||||
|
Json::Value jv = Json::objectValue;
|
||||||
|
jv[jss::hash] = to_string(hash);
|
||||||
|
return jv;
|
||||||
|
}
|
||||||
|
return defs.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace ripple
|
||||||
@@ -23,301 +23,10 @@
|
|||||||
|
|
||||||
#include <xrpl/json/json_value.h>
|
#include <xrpl/json/json_value.h>
|
||||||
#include <xrpl/json/json_writer.h>
|
#include <xrpl/json/json_writer.h>
|
||||||
#include <xrpl/protocol/LedgerFormats.h>
|
|
||||||
#include <xrpl/protocol/SField.h>
|
|
||||||
#include <xrpl/protocol/TER.h>
|
|
||||||
#include <xrpl/protocol/TxFormats.h>
|
|
||||||
#include <xrpl/protocol/digest.h>
|
|
||||||
#include <xrpl/protocol/jss.h>
|
#include <xrpl/protocol/jss.h>
|
||||||
|
|
||||||
#include <boost/algorithm/string.hpp>
|
|
||||||
|
|
||||||
#include <unordered_map>
|
|
||||||
|
|
||||||
namespace ripple {
|
namespace ripple {
|
||||||
|
|
||||||
namespace detail {
|
|
||||||
|
|
||||||
class ServerDefinitions
|
|
||||||
{
|
|
||||||
private:
|
|
||||||
std::string
|
|
||||||
// translate e.g. STI_LEDGERENTRY to LedgerEntry
|
|
||||||
translate(std::string const& inp);
|
|
||||||
|
|
||||||
uint256 defsHash_;
|
|
||||||
Json::Value defs_;
|
|
||||||
|
|
||||||
public:
|
|
||||||
ServerDefinitions();
|
|
||||||
|
|
||||||
bool
|
|
||||||
hashMatches(uint256 hash) const
|
|
||||||
{
|
|
||||||
return defsHash_ == hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
Json::Value const&
|
|
||||||
get() const
|
|
||||||
{
|
|
||||||
return defs_;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
std::string
|
|
||||||
ServerDefinitions::translate(std::string const& inp)
|
|
||||||
{
|
|
||||||
auto replace = [&](char const* oldStr, char const* newStr) -> std::string {
|
|
||||||
std::string out = inp;
|
|
||||||
boost::replace_all(out, oldStr, newStr);
|
|
||||||
return out;
|
|
||||||
};
|
|
||||||
|
|
||||||
auto contains = [&](char const* s) -> bool {
|
|
||||||
return inp.find(s) != std::string::npos;
|
|
||||||
};
|
|
||||||
|
|
||||||
if (contains("UINT"))
|
|
||||||
{
|
|
||||||
if (contains("512") || contains("384") || contains("256") ||
|
|
||||||
contains("192") || contains("160") || contains("128"))
|
|
||||||
return replace("UINT", "Hash");
|
|
||||||
else
|
|
||||||
return replace("UINT", "UInt");
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unordered_map<std::string, std::string> replacements{
|
|
||||||
{"OBJECT", "STObject"},
|
|
||||||
{"ARRAY", "STArray"},
|
|
||||||
{"ACCOUNT", "AccountID"},
|
|
||||||
{"LEDGERENTRY", "LedgerEntry"},
|
|
||||||
{"NOTPRESENT", "NotPresent"},
|
|
||||||
{"PATHSET", "PathSet"},
|
|
||||||
{"VL", "Blob"},
|
|
||||||
{"XCHAIN_BRIDGE", "XChainBridge"},
|
|
||||||
};
|
|
||||||
|
|
||||||
if (auto const& it = replacements.find(inp); it != replacements.end())
|
|
||||||
{
|
|
||||||
return it->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string out;
|
|
||||||
size_t pos = 0;
|
|
||||||
std::string inpToProcess = inp;
|
|
||||||
|
|
||||||
// convert snake_case to CamelCase
|
|
||||||
for (;;)
|
|
||||||
{
|
|
||||||
pos = inpToProcess.find("_");
|
|
||||||
if (pos == std::string::npos)
|
|
||||||
pos = inpToProcess.size();
|
|
||||||
std::string token = inpToProcess.substr(0, pos);
|
|
||||||
if (token.size() > 1)
|
|
||||||
{
|
|
||||||
boost::algorithm::to_lower(token);
|
|
||||||
token.data()[0] -= ('a' - 'A');
|
|
||||||
out += token;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
out += token;
|
|
||||||
if (pos == inpToProcess.size())
|
|
||||||
break;
|
|
||||||
inpToProcess = inpToProcess.substr(pos + 1);
|
|
||||||
}
|
|
||||||
return out;
|
|
||||||
};
|
|
||||||
|
|
||||||
ServerDefinitions::ServerDefinitions() : defs_{Json::objectValue}
|
|
||||||
{
|
|
||||||
// populate SerializedTypeID names and values
|
|
||||||
defs_[jss::TYPES] = Json::objectValue;
|
|
||||||
|
|
||||||
defs_[jss::TYPES]["Done"] = -1;
|
|
||||||
std::map<int32_t, std::string> typeMap{{-1, "Done"}};
|
|
||||||
for (auto const& [rawName, typeValue] : sTypeMap)
|
|
||||||
{
|
|
||||||
std::string typeName =
|
|
||||||
translate(std::string(rawName).substr(4) /* remove STI_ */);
|
|
||||||
defs_[jss::TYPES][typeName] = typeValue;
|
|
||||||
typeMap[typeValue] = typeName;
|
|
||||||
}
|
|
||||||
|
|
||||||
// populate LedgerEntryType names and values
|
|
||||||
defs_[jss::LEDGER_ENTRY_TYPES] = Json::objectValue;
|
|
||||||
defs_[jss::LEDGER_ENTRY_TYPES][jss::Invalid] = -1;
|
|
||||||
|
|
||||||
for (auto const& f : LedgerFormats::getInstance())
|
|
||||||
{
|
|
||||||
defs_[jss::LEDGER_ENTRY_TYPES][f.getName()] = f.getType();
|
|
||||||
}
|
|
||||||
|
|
||||||
// populate SField serialization data
|
|
||||||
defs_[jss::FIELDS] = Json::arrayValue;
|
|
||||||
|
|
||||||
uint32_t i = 0;
|
|
||||||
{
|
|
||||||
Json::Value a = Json::arrayValue;
|
|
||||||
a[0U] = "Generic";
|
|
||||||
Json::Value v = Json::objectValue;
|
|
||||||
v[jss::nth] = 0;
|
|
||||||
v[jss::isVLEncoded] = false;
|
|
||||||
v[jss::isSerialized] = false;
|
|
||||||
v[jss::isSigningField] = false;
|
|
||||||
v[jss::type] = "Unknown";
|
|
||||||
a[1U] = v;
|
|
||||||
defs_[jss::FIELDS][i++] = a;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
Json::Value a = Json::arrayValue;
|
|
||||||
a[0U] = "Invalid";
|
|
||||||
Json::Value v = Json::objectValue;
|
|
||||||
v[jss::nth] = -1;
|
|
||||||
v[jss::isVLEncoded] = false;
|
|
||||||
v[jss::isSerialized] = false;
|
|
||||||
v[jss::isSigningField] = false;
|
|
||||||
v[jss::type] = "Unknown";
|
|
||||||
a[1U] = v;
|
|
||||||
defs_[jss::FIELDS][i++] = a;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
Json::Value a = Json::arrayValue;
|
|
||||||
a[0U] = "ObjectEndMarker";
|
|
||||||
Json::Value v = Json::objectValue;
|
|
||||||
v[jss::nth] = 1;
|
|
||||||
v[jss::isVLEncoded] = false;
|
|
||||||
v[jss::isSerialized] = true;
|
|
||||||
v[jss::isSigningField] = true;
|
|
||||||
v[jss::type] = "STObject";
|
|
||||||
a[1U] = v;
|
|
||||||
defs_[jss::FIELDS][i++] = a;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
Json::Value a = Json::arrayValue;
|
|
||||||
a[0U] = "ArrayEndMarker";
|
|
||||||
Json::Value v = Json::objectValue;
|
|
||||||
v[jss::nth] = 1;
|
|
||||||
v[jss::isVLEncoded] = false;
|
|
||||||
v[jss::isSerialized] = true;
|
|
||||||
v[jss::isSigningField] = true;
|
|
||||||
v[jss::type] = "STArray";
|
|
||||||
a[1U] = v;
|
|
||||||
defs_[jss::FIELDS][i++] = a;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
Json::Value a = Json::arrayValue;
|
|
||||||
a[0U] = "taker_gets_funded";
|
|
||||||
Json::Value v = Json::objectValue;
|
|
||||||
v[jss::nth] = 258;
|
|
||||||
v[jss::isVLEncoded] = false;
|
|
||||||
v[jss::isSerialized] = false;
|
|
||||||
v[jss::isSigningField] = false;
|
|
||||||
v[jss::type] = "Amount";
|
|
||||||
a[1U] = v;
|
|
||||||
defs_[jss::FIELDS][i++] = a;
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
Json::Value a = Json::arrayValue;
|
|
||||||
a[0U] = "taker_pays_funded";
|
|
||||||
Json::Value v = Json::objectValue;
|
|
||||||
v[jss::nth] = 259;
|
|
||||||
v[jss::isVLEncoded] = false;
|
|
||||||
v[jss::isSerialized] = false;
|
|
||||||
v[jss::isSigningField] = false;
|
|
||||||
v[jss::type] = "Amount";
|
|
||||||
a[1U] = v;
|
|
||||||
defs_[jss::FIELDS][i++] = a;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (auto const& [code, f] : ripple::SField::getKnownCodeToField())
|
|
||||||
{
|
|
||||||
if (f->fieldName == "")
|
|
||||||
continue;
|
|
||||||
|
|
||||||
Json::Value innerObj = Json::objectValue;
|
|
||||||
|
|
||||||
uint32_t type = f->fieldType;
|
|
||||||
|
|
||||||
innerObj[jss::nth] = f->fieldValue;
|
|
||||||
|
|
||||||
// whether the field is variable-length encoded
|
|
||||||
// this means that the length is included before the content
|
|
||||||
innerObj[jss::isVLEncoded] =
|
|
||||||
(type == 7U /* Blob */ || type == 8U /* AccountID */ ||
|
|
||||||
type == 19U /* Vector256 */);
|
|
||||||
|
|
||||||
// whether the field is included in serialization
|
|
||||||
innerObj[jss::isSerialized] =
|
|
||||||
(type < 10000 && f->fieldName != "hash" &&
|
|
||||||
f->fieldName != "index"); /* hash, index, TRANSACTION,
|
|
||||||
LEDGER_ENTRY, VALIDATION, METADATA */
|
|
||||||
|
|
||||||
// whether the field is included in serialization when signing
|
|
||||||
innerObj[jss::isSigningField] = f->shouldInclude(false);
|
|
||||||
|
|
||||||
innerObj[jss::type] = typeMap[type];
|
|
||||||
|
|
||||||
Json::Value innerArray = Json::arrayValue;
|
|
||||||
innerArray[0U] = f->fieldName;
|
|
||||||
innerArray[1U] = innerObj;
|
|
||||||
|
|
||||||
defs_[jss::FIELDS][i++] = innerArray;
|
|
||||||
}
|
|
||||||
|
|
||||||
// populate TER code names and values
|
|
||||||
defs_[jss::TRANSACTION_RESULTS] = Json::objectValue;
|
|
||||||
|
|
||||||
for (auto const& [code, terInfo] : transResults())
|
|
||||||
{
|
|
||||||
defs_[jss::TRANSACTION_RESULTS][terInfo.first] = code;
|
|
||||||
}
|
|
||||||
|
|
||||||
// populate TxType names and values
|
|
||||||
defs_[jss::TRANSACTION_TYPES] = Json::objectValue;
|
|
||||||
defs_[jss::TRANSACTION_TYPES][jss::Invalid] = -1;
|
|
||||||
for (auto const& f : TxFormats::getInstance())
|
|
||||||
{
|
|
||||||
defs_[jss::TRANSACTION_TYPES][f.getName()] = f.getType();
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate hash
|
|
||||||
{
|
|
||||||
std::string const out = Json::FastWriter().write(defs_);
|
|
||||||
defsHash_ = ripple::sha512Half(ripple::Slice{out.data(), out.size()});
|
|
||||||
defs_[jss::hash] = to_string(defsHash_);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace detail
|
|
||||||
|
|
||||||
Json::Value
|
|
||||||
doServerDefinitions(RPC::JsonContext& context)
|
|
||||||
{
|
|
||||||
auto& params = context.params;
|
|
||||||
|
|
||||||
uint256 hash;
|
|
||||||
if (params.isMember(jss::hash))
|
|
||||||
{
|
|
||||||
if (!params[jss::hash].isString() ||
|
|
||||||
!hash.parseHex(params[jss::hash].asString()))
|
|
||||||
return RPC::invalid_field_error(jss::hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
static detail::ServerDefinitions const defs{};
|
|
||||||
if (defs.hashMatches(hash))
|
|
||||||
{
|
|
||||||
Json::Value jv = Json::objectValue;
|
|
||||||
jv[jss::hash] = to_string(hash);
|
|
||||||
return jv;
|
|
||||||
}
|
|
||||||
return defs.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
Json::Value
|
Json::Value
|
||||||
doServerInfo(RPC::JsonContext& context)
|
doServerInfo(RPC::JsonContext& context)
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user