From 80d82c5b2b098fb11fb2f98dbcc3b6d5b8d9caf5 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Wed, 23 Jul 2025 18:21:30 +0100 Subject: [PATCH 1/7] Add support for `DomainID` in `MPTokenIssuance` transactions (#5509) This change adds support for `DomainID` to existing transactions `MPTokenIssuanceCreate` and `MPTokenIssuanceSet`. In #5224 `DomainID` was added as an access control mechanism for `SingleAssetVault`. The actual implementation of this feature lies in `MPToken` and `MPTokenIssuance`, hence it makes sense to enable the use of `DomainID` also in `MPTokenIssuanceCreate` and `MPTokenIssuanceSet`, following same rules as in Vault: * `MPTokenIssuanceCreate` and `MPTokenIssuanceSet` can only set `DomainID` if flag `MPTRequireAuth` is set. * `MPTokenIssuanceCreate` requires that `DomainID` be a non-zero, uint256 number. * `MPTokenIssuanceSet` allows `DomainID` to be zero (or empty) in which case it will remove `DomainID` from the `MPTokenIssuance` object. The change is amendment-gated by `SingleAssetVault`. This is a non-breaking change because `SingleAssetVault` amendment is `Supported::no`, i.e. at this moment considered a work in progress, which cannot be enabled on the network. --- .../xrpl/protocol/detail/ledger_entries.macro | 3 +- .../xrpl/protocol/detail/transactions.macro | 6 +- src/test/app/MPToken_test.cpp | 577 ++++++++++++++++-- src/test/jtx/impl/mpt.cpp | 15 + src/test/jtx/mpt.h | 5 + .../app/tx/detail/MPTokenIssuanceCreate.cpp | 16 + .../app/tx/detail/MPTokenIssuanceSet.cpp | 60 +- 7 files changed, 615 insertions(+), 67 deletions(-) diff --git a/include/xrpl/protocol/detail/ledger_entries.macro b/include/xrpl/protocol/detail/ledger_entries.macro index 46c6e60db3..11306ee0f5 100644 --- a/include/xrpl/protocol/detail/ledger_entries.macro +++ b/include/xrpl/protocol/detail/ledger_entries.macro @@ -482,8 +482,7 @@ LEDGER_ENTRY(ltDELEGATE, 0x0083, Delegate, delegate, ({ })) /** A ledger object representing a single asset vault. - - \sa keylet::mptoken + \sa keylet::vault */ LEDGER_ENTRY(ltVAULT, 0x0084, Vault, vault, ({ {sfPreviousTxnID, soeREQUIRED}, diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 1d59e71850..38665296cd 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -409,6 +409,7 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_CREATE, 54, MPTokenIssuanceCreate, Delegation::de {sfTransferFee, soeOPTIONAL}, {sfMaximumAmount, soeOPTIONAL}, {sfMPTokenMetadata, soeOPTIONAL}, + {sfDomainID, soeOPTIONAL}, })) /** This transaction type destroys a MPTokensIssuance instance */ @@ -420,6 +421,7 @@ TRANSACTION(ttMPTOKEN_ISSUANCE_DESTROY, 55, MPTokenIssuanceDestroy, Delegation:: TRANSACTION(ttMPTOKEN_ISSUANCE_SET, 56, MPTokenIssuanceSet, Delegation::delegatable, ({ {sfMPTokenIssuanceID, soeREQUIRED}, {sfHolder, soeOPTIONAL}, + {sfDomainID, soeOPTIONAL}, })) /** This transaction type authorizes a MPToken instance */ @@ -478,7 +480,7 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, Delegation::delegatable, ({ {sfAsset, soeREQUIRED, soeMPTSupported}, {sfAssetsMaximum, soeOPTIONAL}, {sfMPTokenMetadata, soeOPTIONAL}, - {sfDomainID, soeOPTIONAL}, // PermissionedDomainID + {sfDomainID, soeOPTIONAL}, {sfWithdrawalPolicy, soeOPTIONAL}, {sfData, soeOPTIONAL}, })) @@ -487,7 +489,7 @@ TRANSACTION(ttVAULT_CREATE, 65, VaultCreate, Delegation::delegatable, ({ TRANSACTION(ttVAULT_SET, 66, VaultSet, Delegation::delegatable, ({ {sfVaultID, soeREQUIRED}, {sfAssetsMaximum, soeOPTIONAL}, - {sfDomainID, soeOPTIONAL}, // PermissionedDomainID + {sfDomainID, soeOPTIONAL}, {sfData, soeOPTIONAL}, })) diff --git a/src/test/app/MPToken_test.cpp b/src/test/app/MPToken_test.cpp index 46b64e40f2..2cb47780ba 100644 --- a/src/test/app/MPToken_test.cpp +++ b/src/test/app/MPToken_test.cpp @@ -18,10 +18,16 @@ //============================================================================== #include +#include +#include #include #include +#include +#include #include +#include +#include #include namespace ripple { @@ -61,6 +67,48 @@ class MPToken_test : public beast::unit_test::suite .metadata = "test", .err = temMALFORMED}); + if (!features[featureSingleAssetVault]) + { + // tries to set DomainID when SAV is disabled + mptAlice.create( + {.maxAmt = 100, + .assetScale = 0, + .metadata = "test", + .flags = tfMPTRequireAuth, + .domainID = uint256(42), + .err = temDISABLED}); + } + else if (!features[featurePermissionedDomains]) + { + // tries to set DomainID when PD is disabled + mptAlice.create( + {.maxAmt = 100, + .assetScale = 0, + .metadata = "test", + .flags = tfMPTRequireAuth, + .domainID = uint256(42), + .err = temDISABLED}); + } + else + { + // tries to set DomainID when RequireAuth is not set + mptAlice.create( + {.maxAmt = 100, + .assetScale = 0, + .metadata = "test", + .domainID = uint256(42), + .err = temMALFORMED}); + + // tries to set zero DomainID + mptAlice.create( + {.maxAmt = 100, + .assetScale = 0, + .metadata = "test", + .flags = tfMPTRequireAuth, + .domainID = beast::zero, + .err = temMALFORMED}); + } + // tries to set a txfee greater than max mptAlice.create( {.maxAmt = 100, @@ -140,6 +188,48 @@ class MPToken_test : public beast::unit_test::suite BEAST_EXPECT( result[sfMaximumAmount.getJsonName()] == "9223372036854775807"); } + + if (features[featureSingleAssetVault]) + { + // Add permissioned domain + Account const credIssuer1{"credIssuer1"}; + std::string const credType = "credential"; + + pdomain::Credentials const credentials1{ + {.issuer = credIssuer1, .credType = credType}}; + + { + Env env{*this, features}; + env.fund(XRP(1000), credIssuer1); + + env(pdomain::setTx(credIssuer1, credentials1)); + auto const domainId1 = [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + + MPTTester mptAlice(env, alice); + mptAlice.create({ + .maxAmt = maxMPTokenAmount, // 9'223'372'036'854'775'807 + .assetScale = 1, + .transferFee = 10, + .metadata = "123", + .ownerCount = 1, + .flags = tfMPTCanLock | tfMPTRequireAuth | tfMPTCanEscrow | + tfMPTCanTrade | tfMPTCanTransfer | tfMPTCanClawback, + .domainID = domainId1, + }); + + // Get the hash for the most recent transaction. + std::string const txHash{ + env.tx()->getJson(JsonOptions::none)[jss::hash].asString()}; + + Json::Value const result = env.rpc("tx", txHash)[jss::result]; + BEAST_EXPECT( + result[sfMaximumAmount.getJsonName()] == + "9223372036854775807"); + } + } } void @@ -499,6 +589,59 @@ class MPToken_test : public beast::unit_test::suite .flags = 0x00000008, .err = temINVALID_FLAG}); + if (!features[featureSingleAssetVault]) + { + // test invalid flags - nothing is being changed + mptAlice.set( + {.account = alice, + .flags = 0x00000000, + .err = tecNO_PERMISSION}); + + mptAlice.set( + {.account = alice, + .holder = bob, + .flags = 0x00000000, + .err = tecNO_PERMISSION}); + + // cannot set DomainID since SAV is not enabled + mptAlice.set( + {.account = alice, + .domainID = uint256(42), + .err = temDISABLED}); + } + else + { + // test invalid flags - nothing is being changed + mptAlice.set( + {.account = alice, + .flags = 0x00000000, + .err = temMALFORMED}); + + mptAlice.set( + {.account = alice, + .holder = bob, + .flags = 0x00000000, + .err = temMALFORMED}); + + if (!features[featurePermissionedDomains]) + { + // cannot set DomainID since PD is not enabled + mptAlice.set( + {.account = alice, + .domainID = uint256(42), + .err = temDISABLED}); + } + else + { + // cannot set DomainID since Holder is set + mptAlice.set( + {.account = alice, + .holder = bob, + .domainID = uint256(42), + .err = temMALFORMED}); + } + } + // set both lock and unlock flags at the same time will fail mptAlice.set( {.account = alice, @@ -582,6 +725,53 @@ class MPToken_test : public beast::unit_test::suite mptAlice.set( {.holder = cindy, .flags = tfMPTLock, .err = tecNO_DST}); } + + if (features[featureSingleAssetVault] && + features[featurePermissionedDomains]) + { + // Add permissioned domain + Account const credIssuer1{"credIssuer1"}; + std::string const credType = "credential"; + + pdomain::Credentials const credentials1{ + {.issuer = credIssuer1, .credType = credType}}; + + { + Env env{*this, features}; + + MPTTester mptAlice(env, alice); + mptAlice.create({}); + + // Trying to set DomainID on a public MPTokenIssuance + mptAlice.set( + {.domainID = uint256(42), .err = tecNO_PERMISSION}); + + mptAlice.set( + {.domainID = beast::zero, .err = tecNO_PERMISSION}); + } + + { + Env env{*this, features}; + + MPTTester mptAlice(env, alice); + mptAlice.create({.flags = tfMPTRequireAuth}); + + // Trying to set non-existing DomainID + mptAlice.set( + {.domainID = uint256(42), .err = tecOBJECT_NOT_FOUND}); + + // Trying to lock but locking is disabled + mptAlice.set( + {.flags = tfMPTUnlock, + .domainID = uint256(42), + .err = tecNO_PERMISSION}); + + mptAlice.set( + {.flags = tfMPTUnlock, + .domainID = beast::zero, + .err = tecNO_PERMISSION}); + } + } } void @@ -590,71 +780,136 @@ class MPToken_test : public beast::unit_test::suite testcase("Enabled set transaction"); using namespace test::jtx; - - // Test locking and unlocking - Env env{*this, features}; Account const alice("alice"); // issuer Account const bob("bob"); // holder - MPTTester mptAlice(env, alice, {.holders = {bob}}); - - // create a mptokenissuance with locking - mptAlice.create( - {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanLock}); - - mptAlice.authorize({.account = bob, .holderCount = 1}); - - // locks bob's mptoken - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); - - // trying to lock bob's mptoken again will still succeed - // but no changes to the objects - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); - - // alice locks the mptissuance - mptAlice.set({.account = alice, .flags = tfMPTLock}); - - // alice tries to lock up both mptissuance and mptoken again - // it will not change the flags and both will remain locked. - mptAlice.set({.account = alice, .flags = tfMPTLock}); - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); - - // alice unlocks bob's mptoken - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTUnlock}); - - // locks up bob's mptoken again - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); - if (!features[featureSingleAssetVault]) { - // Delete bobs' mptoken even though it is locked - mptAlice.authorize({.account = bob, .flags = tfMPTUnauthorize}); + // Test locking and unlocking + Env env{*this, features}; + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + // create a mptokenissuance with locking + mptAlice.create( + {.ownerCount = 1, .holderCount = 0, .flags = tfMPTCanLock}); + + mptAlice.authorize({.account = bob, .holderCount = 1}); + + // locks bob's mptoken + mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + + // trying to lock bob's mptoken again will still succeed + // but no changes to the objects + mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + + // alice locks the mptissuance + mptAlice.set({.account = alice, .flags = tfMPTLock}); + + // alice tries to lock up both mptissuance and mptoken again + // it will not change the flags and both will remain locked. + mptAlice.set({.account = alice, .flags = tfMPTLock}); + mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + + // alice unlocks bob's mptoken mptAlice.set( - {.account = alice, - .holder = bob, - .flags = tfMPTUnlock, - .err = tecOBJECT_NOT_FOUND}); + {.account = alice, .holder = bob, .flags = tfMPTUnlock}); - return; + // locks up bob's mptoken again + mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTLock}); + if (!features[featureSingleAssetVault]) + { + // Delete bobs' mptoken even though it is locked + mptAlice.authorize({.account = bob, .flags = tfMPTUnauthorize}); + + mptAlice.set( + {.account = alice, + .holder = bob, + .flags = tfMPTUnlock, + .err = tecOBJECT_NOT_FOUND}); + + return; + } + + // Cannot delete locked MPToken + mptAlice.authorize( + {.account = bob, + .flags = tfMPTUnauthorize, + .err = tecNO_PERMISSION}); + + // alice unlocks mptissuance + mptAlice.set({.account = alice, .flags = tfMPTUnlock}); + + // alice unlocks bob's mptoken + mptAlice.set( + {.account = alice, .holder = bob, .flags = tfMPTUnlock}); + + // alice unlocks mptissuance and bob's mptoken again despite that + // they are already unlocked. Make sure this will not change the + // flags + mptAlice.set( + {.account = alice, .holder = bob, .flags = tfMPTUnlock}); + mptAlice.set({.account = alice, .flags = tfMPTUnlock}); } - // Cannot delete locked MPToken - mptAlice.authorize( - {.account = bob, - .flags = tfMPTUnauthorize, - .err = tecNO_PERMISSION}); + if (features[featureSingleAssetVault]) + { + // Add permissioned domain + std::string const credType = "credential"; - // alice unlocks mptissuance - mptAlice.set({.account = alice, .flags = tfMPTUnlock}); + // Test setting and resetting domain ID + Env env{*this, features}; - // alice unlocks bob's mptoken - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTUnlock}); + auto const domainId1 = [&]() { + Account const credIssuer1{"credIssuer1"}; + env.fund(XRP(1000), credIssuer1); - // alice unlocks mptissuance and bob's mptoken again despite that - // they are already unlocked. Make sure this will not change the - // flags - mptAlice.set({.account = alice, .holder = bob, .flags = tfMPTUnlock}); - mptAlice.set({.account = alice, .flags = tfMPTUnlock}); + pdomain::Credentials const credentials1{ + {.issuer = credIssuer1, .credType = credType}}; + + env(pdomain::setTx(credIssuer1, credentials1)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + + auto const domainId2 = [&]() { + Account const credIssuer2{"credIssuer2"}; + env.fund(XRP(1000), credIssuer2); + + pdomain::Credentials const credentials2{ + {.issuer = credIssuer2, .credType = credType}}; + + env(pdomain::setTx(credIssuer2, credentials2)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + + MPTTester mptAlice(env, alice, {.holders = {bob}}); + + // create a mptokenissuance with auth. + mptAlice.create( + {.ownerCount = 1, .holderCount = 0, .flags = tfMPTRequireAuth}); + BEAST_EXPECT(mptAlice.checkDomainID(std::nullopt)); + + // reset "domain not set" to "domain not set", i.e. no change + mptAlice.set({.domainID = beast::zero}); + BEAST_EXPECT(mptAlice.checkDomainID(std::nullopt)); + + // reset "domain not set" to domain1 + mptAlice.set({.domainID = domainId1}); + BEAST_EXPECT(mptAlice.checkDomainID(domainId1)); + + // reset domain1 to domain2 + mptAlice.set({.domainID = domainId2}); + BEAST_EXPECT(mptAlice.checkDomainID(domainId2)); + + // reset domain to "domain not set" + mptAlice.set({.domainID = beast::zero}); + BEAST_EXPECT(mptAlice.checkDomainID(std::nullopt)); + } } void @@ -889,6 +1144,200 @@ class MPToken_test : public beast::unit_test::suite mptAlice.pay(bob, alice, 100, tecNO_AUTH); } + if (features[featureSingleAssetVault] && + features[featurePermissionedDomains]) + { + // If RequireAuth is enabled and domain is a match, payment succeeds + { + Env env{*this, features}; + std::string const credType = "credential"; + Account const credIssuer1{"credIssuer1"}; + env.fund(XRP(1000), credIssuer1, bob); + + auto const domainId1 = [&]() { + pdomain::Credentials const credentials1{ + {.issuer = credIssuer1, .credType = credType}}; + + env(pdomain::setTx(credIssuer1, credentials1)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + // bob is authorized via domain + env(credentials::create(bob, credIssuer1, credType)); + env(credentials::accept(bob, credIssuer1, credType)); + env.close(); + + MPTTester mptAlice(env, alice, {}); + env.close(); + + mptAlice.create({ + .ownerCount = 1, + .holderCount = 0, + .flags = tfMPTRequireAuth | tfMPTCanTransfer, + .domainID = domainId1, + }); + + mptAlice.authorize({.account = bob}); + env.close(); + + // bob is authorized via domain + mptAlice.pay(alice, bob, 100); + mptAlice.set({.domainID = beast::zero}); + + // bob is no longer authorized + mptAlice.pay(alice, bob, 100, tecNO_AUTH); + } + + { + Env env{*this, features}; + std::string const credType = "credential"; + Account const credIssuer1{"credIssuer1"}; + env.fund(XRP(1000), credIssuer1, bob); + + auto const domainId1 = [&]() { + pdomain::Credentials const credentials1{ + {.issuer = credIssuer1, .credType = credType}}; + + env(pdomain::setTx(credIssuer1, credentials1)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + // bob is authorized via domain + env(credentials::create(bob, credIssuer1, credType)); + env(credentials::accept(bob, credIssuer1, credType)); + env.close(); + + MPTTester mptAlice(env, alice, {}); + env.close(); + + mptAlice.create({ + .ownerCount = 1, + .holderCount = 0, + .flags = tfMPTRequireAuth | tfMPTCanTransfer, + .domainID = domainId1, + }); + + // bob creates an empty MPToken + mptAlice.authorize({.account = bob}); + + // alice authorizes bob to hold funds + mptAlice.authorize({.account = alice, .holder = bob}); + + // alice sends 100 MPT to bob + mptAlice.pay(alice, bob, 100); + + // alice UNAUTHORIZES bob + mptAlice.authorize( + {.account = alice, + .holder = bob, + .flags = tfMPTUnauthorize}); + + // bob is still authorized, via domain + mptAlice.pay(bob, alice, 10); + + mptAlice.set({.domainID = beast::zero}); + + // bob fails to send back to alice because he is no longer + // authorize to move his funds! + mptAlice.pay(bob, alice, 10, tecNO_AUTH); + } + + { + Env env{*this, features}; + std::string const credType = "credential"; + // credIssuer1 is the owner of domainId1 and a credential issuer + Account const credIssuer1{"credIssuer1"}; + // credIssuer2 is the owner of domainId2 and a credential issuer + // Note, domainId2 also lists credentials issued by credIssuer1 + Account const credIssuer2{"credIssuer2"}; + env.fund(XRP(1000), credIssuer1, credIssuer2, bob, carol); + + auto const domainId1 = [&]() { + pdomain::Credentials const credentials{ + {.issuer = credIssuer1, .credType = credType}}; + + env(pdomain::setTx(credIssuer1, credentials)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + + auto const domainId2 = [&]() { + pdomain::Credentials const credentials{ + {.issuer = credIssuer1, .credType = credType}, + {.issuer = credIssuer2, .credType = credType}}; + + env(pdomain::setTx(credIssuer2, credentials)); + return [&]() { + auto tx = env.tx()->getJson(JsonOptions::none); + return pdomain::getNewDomain(env.meta()); + }(); + }(); + + // bob is authorized via credIssuer1 which is recognized by both + // domainId1 and domainId2 + env(credentials::create(bob, credIssuer1, credType)); + env(credentials::accept(bob, credIssuer1, credType)); + env.close(); + + // carol is authorized via credIssuer2, only recognized by + // domainId2 + env(credentials::create(carol, credIssuer2, credType)); + env(credentials::accept(carol, credIssuer2, credType)); + env.close(); + + MPTTester mptAlice(env, alice, {}); + env.close(); + + mptAlice.create({ + .ownerCount = 1, + .holderCount = 0, + .flags = tfMPTRequireAuth | tfMPTCanTransfer, + .domainID = domainId1, + }); + + // bob and carol create an empty MPToken + mptAlice.authorize({.account = bob}); + mptAlice.authorize({.account = carol}); + env.close(); + + // alice sends 50 MPT to bob but cannot send to carol + mptAlice.pay(alice, bob, 50); + mptAlice.pay(alice, carol, 50, tecNO_AUTH); + env.close(); + + // bob cannot send to carol because they are not on the same + // domain (since credIssuer2 is not recognized by domainId1) + mptAlice.pay(bob, carol, 10, tecNO_AUTH); + env.close(); + + // alice updates domainID to domainId2 which recognizes both + // credIssuer1 and credIssuer2 + mptAlice.set({.domainID = domainId2}); + // alice can now send to carol + mptAlice.pay(alice, carol, 10); + env.close(); + + // bob can now send to carol because both are in the same + // domain + mptAlice.pay(bob, carol, 10); + env.close(); + + // bob loses his authorization and can no longer send MPT + env(credentials::deleteCred( + credIssuer1, bob, credIssuer1, credType)); + env.close(); + + mptAlice.pay(bob, carol, 10, tecNO_AUTH); + mptAlice.pay(bob, alice, 10, tecNO_AUTH); + } + } + // Non-issuer cannot send to each other if MPTCanTransfer isn't set { Env env(*this, features); @@ -1340,10 +1789,8 @@ class MPToken_test : public beast::unit_test::suite } void - testDepositPreauth() + testDepositPreauth(FeatureBitset features) { - testcase("DepositPreauth"); - using namespace test::jtx; Account const alice("alice"); // issuer Account const bob("bob"); // holder @@ -1352,8 +1799,11 @@ class MPToken_test : public beast::unit_test::suite char const credType[] = "abcde"; + if (features[featureCredentials]) { - Env env(*this); + testcase("DepositPreauth"); + + Env env(*this, features); env.fund(XRP(50000), diana, dpIssuer); env.close(); @@ -2297,6 +2747,8 @@ public: // MPTokenIssuanceCreate testCreateValidation(all - featureSingleAssetVault); + testCreateValidation( + (all | featureSingleAssetVault) - featurePermissionedDomains); testCreateValidation(all | featureSingleAssetVault); testCreateEnabled(all - featureSingleAssetVault); testCreateEnabled(all | featureSingleAssetVault); @@ -2314,7 +2766,11 @@ public: testAuthorizeEnabled(all | featureSingleAssetVault); // MPTokenIssuanceSet - testSetValidation(all); + testSetValidation(all - featureSingleAssetVault); + testSetValidation( + (all | featureSingleAssetVault) - featurePermissionedDomains); + testSetValidation(all | featureSingleAssetVault); + testSetEnabled(all - featureSingleAssetVault); testSetEnabled(all | featureSingleAssetVault); @@ -2323,8 +2779,9 @@ public: testClawback(all); // Test Direct Payment - testPayment(all); - testDepositPreauth(); + testPayment(all | featureSingleAssetVault); + testDepositPreauth(all); + testDepositPreauth(all - featureCredentials); // Test MPT Amount is invalid in Tx, which don't support MPT testMPTInvalidInTx(all); diff --git a/src/test/jtx/impl/mpt.cpp b/src/test/jtx/impl/mpt.cpp index d33432d316..9f7a611feb 100644 --- a/src/test/jtx/impl/mpt.cpp +++ b/src/test/jtx/impl/mpt.cpp @@ -19,6 +19,7 @@ #include +#include #include namespace ripple { @@ -99,6 +100,8 @@ MPTTester::create(MPTCreate const& arg) jv[sfMPTokenMetadata] = strHex(*arg.metadata); if (arg.maxAmt) jv[sfMaximumAmount] = std::to_string(*arg.maxAmt); + if (arg.domainID) + jv[sfDomainID] = to_string(*arg.domainID); if (submit(arg, jv) != tesSUCCESS) { // Verify issuance doesn't exist @@ -235,6 +238,8 @@ MPTTester::set(MPTSet const& arg) jv[sfHolder] = arg.holder->human(); if (arg.delegate) jv[sfDelegate] = arg.delegate->human(); + if (arg.domainID) + jv[sfDomainID] = to_string(*arg.domainID); if (submit(arg, jv) == tesSUCCESS && arg.flags.value_or(0)) { auto require = [&](std::optional const& holder, @@ -272,6 +277,16 @@ MPTTester::forObject( return false; } +[[nodiscard]] bool +MPTTester::checkDomainID(std::optional expected) const +{ + return forObject([&](SLEP const& sle) -> bool { + if (sle->isFieldPresent(sfDomainID)) + return expected == sle->getFieldH256(sfDomainID); + return (!expected.has_value()); + }); +} + [[nodiscard]] bool MPTTester::checkMPTokenAmount( Account const& holder_, diff --git a/src/test/jtx/mpt.h b/src/test/jtx/mpt.h index 64eaa452f5..4756ca723d 100644 --- a/src/test/jtx/mpt.h +++ b/src/test/jtx/mpt.h @@ -106,6 +106,7 @@ struct MPTCreate std::optional holderCount = std::nullopt; bool fund = true; std::optional flags = {0}; + std::optional domainID = std::nullopt; std::optional err = std::nullopt; }; @@ -139,6 +140,7 @@ struct MPTSet std::optional holderCount = std::nullopt; std::optional flags = std::nullopt; std::optional delegate = std::nullopt; + std::optional domainID = std::nullopt; std::optional err = std::nullopt; }; @@ -165,6 +167,9 @@ public: void set(MPTSet const& set = {}); + [[nodiscard]] bool + checkDomainID(std::optional expected) const; + [[nodiscard]] bool checkMPTokenAmount(Account const& holder, std::int64_t expectedAmount) const; diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp index 1b96b27f24..da3b57c8fe 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceCreate.cpp @@ -31,6 +31,11 @@ MPTokenIssuanceCreate::preflight(PreflightContext const& ctx) if (!ctx.rules.enabled(featureMPTokensV1)) return temDISABLED; + if (ctx.tx.isFieldPresent(sfDomainID) && + !(ctx.rules.enabled(featurePermissionedDomains) && + ctx.rules.enabled(featureSingleAssetVault))) + return temDISABLED; + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; @@ -48,6 +53,16 @@ MPTokenIssuanceCreate::preflight(PreflightContext const& ctx) return temMALFORMED; } + if (auto const domain = ctx.tx[~sfDomainID]) + { + if (*domain == beast::zero) + return temMALFORMED; + + // Domain present implies that MPTokenIssuance is not public + if ((ctx.tx.getFlags() & tfMPTRequireAuth) == 0) + return temMALFORMED; + } + if (auto const metadata = ctx.tx[~sfMPTokenMetadata]) { if (metadata->length() == 0 || @@ -142,6 +157,7 @@ MPTokenIssuanceCreate::doApply() .assetScale = tx[~sfAssetScale], .transferFee = tx[~sfTransferFee], .metadata = tx[~sfMPTokenMetadata], + .domainId = tx[~sfDomainID], }); return result ? tesSUCCESS : result.error(); } diff --git a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp index 06ea089526..e05862af37 100644 --- a/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp +++ b/src/xrpld/app/tx/detail/MPTokenIssuanceSet.cpp @@ -21,6 +21,7 @@ #include #include +#include #include namespace ripple { @@ -31,6 +32,14 @@ MPTokenIssuanceSet::preflight(PreflightContext const& ctx) if (!ctx.rules.enabled(featureMPTokensV1)) return temDISABLED; + if (ctx.tx.isFieldPresent(sfDomainID) && + !(ctx.rules.enabled(featurePermissionedDomains) && + ctx.rules.enabled(featureSingleAssetVault))) + return temDISABLED; + + if (ctx.tx.isFieldPresent(sfDomainID) && ctx.tx.isFieldPresent(sfHolder)) + return temMALFORMED; + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; @@ -48,6 +57,13 @@ MPTokenIssuanceSet::preflight(PreflightContext const& ctx) if (holderID && accountID == holderID) return temMALFORMED; + if (ctx.rules.enabled(featureSingleAssetVault)) + { + // Is this transaction actually changing anything ? + if (txFlags == 0 && !ctx.tx.isFieldPresent(sfDomainID)) + return temMALFORMED; + } + return preflight2(ctx); } @@ -97,9 +113,14 @@ MPTokenIssuanceSet::preclaim(PreclaimContext const& ctx) if (!sleMptIssuance) return tecOBJECT_NOT_FOUND; - // if the mpt has disabled locking - if (!((*sleMptIssuance)[sfFlags] & lsfMPTCanLock)) - return tecNO_PERMISSION; + if (!sleMptIssuance->isFlag(lsfMPTCanLock)) + { + // For readability two separate `if` rather than `||` of two conditions + if (!ctx.view.rules().enabled(featureSingleAssetVault)) + return tecNO_PERMISSION; + else if (ctx.tx.isFlag(tfMPTLock) || ctx.tx.isFlag(tfMPTUnlock)) + return tecNO_PERMISSION; + } // ensure it is issued by the tx submitter if ((*sleMptIssuance)[sfIssuer] != ctx.tx[sfAccount]) @@ -117,6 +138,20 @@ MPTokenIssuanceSet::preclaim(PreclaimContext const& ctx) return tecOBJECT_NOT_FOUND; } + if (auto const domain = ctx.tx[~sfDomainID]) + { + if (not sleMptIssuance->isFlag(lsfMPTRequireAuth)) + return tecNO_PERMISSION; + + if (*domain != beast::zero) + { + auto const sleDomain = + ctx.view.read(keylet::permissionedDomain(*domain)); + if (!sleDomain) + return tecOBJECT_NOT_FOUND; + } + } + return tesSUCCESS; } @@ -126,6 +161,7 @@ MPTokenIssuanceSet::doApply() auto const mptIssuanceID = ctx_.tx[sfMPTokenIssuanceID]; auto const txFlags = ctx_.tx.getFlags(); auto const holderID = ctx_.tx[~sfHolder]; + auto const domainID = ctx_.tx[~sfDomainID]; std::shared_ptr sle; if (holderID) @@ -147,6 +183,24 @@ MPTokenIssuanceSet::doApply() if (flagsIn != flagsOut) sle->setFieldU32(sfFlags, flagsOut); + if (domainID) + { + // This is enforced in preflight. + XRPL_ASSERT( + sle->getType() == ltMPTOKEN_ISSUANCE, + "MPTokenIssuanceSet::doApply : modifying MPTokenIssuance"); + + if (*domainID != beast::zero) + { + sle->setFieldH256(sfDomainID, *domainID); + } + else + { + if (sle->isFieldPresent(sfDomainID)) + sle->makeFieldAbsent(sfDomainID); + } + } + view().update(sle); return tesSUCCESS; From 60e340d35628640a14ced0fdbbeef43393625353 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> Date: Wed, 23 Jul 2025 10:53:18 -0700 Subject: [PATCH 2/7] Include `network_id` in validations and subscription stream responses (#5579) This change includes `network_id` data in the validations and ledger subscription stream responses, as well as unit tests to validate the response fields. Fixes #4783 --- src/test/rpc/Subscribe_test.cpp | 15 +++++++++++++-- src/xrpld/app/misc/NetworkOPs.cpp | 4 ++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/test/rpc/Subscribe_test.cpp b/src/test/rpc/Subscribe_test.cpp index 989afc0acc..e0db79bf53 100644 --- a/src/test/rpc/Subscribe_test.cpp +++ b/src/test/rpc/Subscribe_test.cpp @@ -131,6 +131,9 @@ public: BEAST_EXPECT(jv.isMember(jss::id) && jv[jss::id] == 5); } BEAST_EXPECT(jv[jss::result][jss::ledger_index] == 2); + BEAST_EXPECT( + jv[jss::result][jss::network_id] == + env.app().config().NETWORK_ID); } { @@ -139,7 +142,8 @@ public: // Check stream update BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { - return jv[jss::ledger_index] == 3; + return jv[jss::ledger_index] == 3 && + jv[jss::network_id] == env.app().config().NETWORK_ID; })); } @@ -149,7 +153,8 @@ public: // Check stream update BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) { - return jv[jss::ledger_index] == 4; + return jv[jss::ledger_index] == 4 && + jv[jss::network_id] == env.app().config().NETWORK_ID; })); } @@ -509,6 +514,11 @@ public: if (!jv.isMember(jss::validated_hash)) return false; + uint32_t netID = env.app().config().NETWORK_ID; + if (!jv.isMember(jss::network_id) || + jv[jss::network_id] != netID) + return false; + // Certain fields are only added on a flag ledger. bool const isFlagLedger = (env.closed()->info().seq + 1) % 256 == 0; @@ -567,6 +577,7 @@ public: jv[jss::streams][0u] = "ledger"; jr = env.rpc("json", "subscribe", to_string(jv))[jss::result]; BEAST_EXPECT(jr[jss::status] == "success"); + BEAST_EXPECT(jr[jss::network_id] == env.app().config().NETWORK_ID); jr = env.rpc("json", "unsubscribe", to_string(jv))[jss::result]; BEAST_EXPECT(jr[jss::status] == "success"); diff --git a/src/xrpld/app/misc/NetworkOPs.cpp b/src/xrpld/app/misc/NetworkOPs.cpp index 1ac42579ba..3220ce99fc 100644 --- a/src/xrpld/app/misc/NetworkOPs.cpp +++ b/src/xrpld/app/misc/NetworkOPs.cpp @@ -2415,6 +2415,7 @@ NetworkOPsImp::pubValidation(std::shared_ptr const& val) jvObj[jss::flags] = val->getFlags(); jvObj[jss::signing_time] = *(*val)[~sfSigningTime]; jvObj[jss::data] = strHex(val->getSerializer().slice()); + jvObj[jss::network_id] = app_.config().NETWORK_ID; if (auto version = (*val)[~sfServerVersion]) jvObj[jss::server_version] = std::to_string(*version); @@ -3119,6 +3120,8 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) jvObj[jss::ledger_time] = Json::Value::UInt( lpAccepted->info().closeTime.time_since_epoch().count()); + jvObj[jss::network_id] = app_.config().NETWORK_ID; + if (!lpAccepted->rules().enabled(featureXRPFees)) jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED; jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped(); @@ -4177,6 +4180,7 @@ NetworkOPsImp::subLedger(InfoSub::ref isrListener, Json::Value& jvResult) jvResult[jss::reserve_base] = lpClosed->fees().accountReserve(0).jsonClipped(); jvResult[jss::reserve_inc] = lpClosed->fees().increment.jsonClipped(); + jvResult[jss::network_id] = app_.config().NETWORK_ID; } if ((mMode >= OperatingMode::SYNCING) && !isNeedNetworkLedger()) From 5713f9782ab2ccf59db37d31e14a600fb9466556 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 24 Jul 2025 11:35:47 +0100 Subject: [PATCH 3/7] chore: Rename conan profile to `default` (#5599) This change renames the `libxrpl` profile to `default` to make it more usable. --- .github/workflows/macos.yml | 2 -- .github/workflows/nix.yml | 2 -- .github/workflows/windows.yml | 2 -- conan/profiles/{libxrpl => default} | 18 ++++++++++++++++++ conanfile.py | 2 -- 5 files changed, 18 insertions(+), 8 deletions(-) rename conan/profiles/{libxrpl => default} (50%) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 8acd90eeff..3c47a8bd53 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -24,8 +24,6 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} - core:default_build_profile=libxrpl - core:default_profile=libxrpl tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} tools.build:verbosity=verbose tools.compilation:verbosity=verbose diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 8218dcc276..9ff96035b2 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -25,8 +25,6 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{ os.cpu_count() }} core.upload:parallel={{ os.cpu_count() }} - core:default_build_profile=libxrpl - core:default_profile=libxrpl tools.build:jobs={{ (os.cpu_count() * 4/5) | int }} tools.build:verbosity=verbose tools.compilation:verbosity=verbose diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 254850f26a..1479c47600 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -27,8 +27,6 @@ env: CONAN_GLOBAL_CONF: | core.download:parallel={{os.cpu_count()}} core.upload:parallel={{os.cpu_count()}} - core:default_build_profile=libxrpl - core:default_profile=libxrpl tools.build:jobs=24 tools.build:verbosity=verbose tools.compilation:verbosity=verbose diff --git a/conan/profiles/libxrpl b/conan/profiles/default similarity index 50% rename from conan/profiles/libxrpl rename to conan/profiles/default index b037b8c4a2..0417704f8a 100644 --- a/conan/profiles/libxrpl +++ b/conan/profiles/default @@ -9,6 +9,7 @@ [settings] os={{ os }} arch={{ arch }} +build_type=Debug compiler={{compiler}} compiler.version={{ compiler_version }} compiler.cppstd=20 @@ -17,3 +18,20 @@ compiler.runtime=static {% else %} compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}} {% endif %} + +[conf] +{% if compiler == "clang" and compiler_version >= 19 %} +tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] +{% endif %} +{% if compiler == "apple-clang" and compiler_version >= 17 %} +tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] +{% endif %} +{% if compiler == "clang" and compiler_version == 16 %} +tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS'] +{% endif %} +{% if compiler == "gcc" and compiler_version < 13 %} +tools.build:cxxflags=['-Wno-restrict'] +{% endif %} + +[tool_requires] +!cmake/*: cmake/[>=3 <4] diff --git a/conanfile.py b/conanfile.py index d79b47bc6f..bb65969288 100644 --- a/conanfile.py +++ b/conanfile.py @@ -143,8 +143,6 @@ class Xrpl(ConanFile): tc.variables['static'] = self.options.static tc.variables['unity'] = self.options.unity tc.variables['xrpld'] = self.options.xrpld - if self.settings.compiler == 'clang' and self.settings.compiler.version == 16: - tc.extra_cxxflags = ["-DBOOST_ASIO_DISABLE_CONCEPTS"] tc.generate() def build(self): From b2960b9e7f713ea40a997712105eadb85770f918 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Thu, 24 Jul 2025 14:20:50 +0100 Subject: [PATCH 4/7] Switch instrumentation workflow to use dependencies (#5607) Before `XRPLF/ci` images, we did not have a `dependencies:` job for clang-16, so `instrumentation:` had to build its own dependencies. Now we have clang-16 Conan dependencies built in a separate job that can be used. --- .github/workflows/nix.yml | 53 +++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 9ff96035b2..c3560dbab7 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -357,39 +357,44 @@ jobs: cmake --build . ./example | grep '^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+' - # NOTE we are not using dependencies built above because it lags with - # compiler versions. Instrumentation requires clang version 16 or - # later - instrumentation-build: - if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }} - env: - CLANG_RELEASE: 16 + needs: dependencies runs-on: [self-hosted, heavy] container: ghcr.io/xrplf/ci/debian-bookworm:clang-16 - + env: + build_dir: .build steps: + - name: download cache + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 + with: + name: linux-clang-Debug + + - name: extract cache + run: | + mkdir -p ${CONAN_HOME} + tar -xzf conan.tar.gz -C ${CONAN_HOME} + + - name: check environment + run: | + echo ${PATH} | tr ':' '\n' + conan --version + cmake --version + env | sort + ls ${CONAN_HOME} + - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + - name: dependencies + uses: ./.github/actions/dependencies + with: + configuration: Debug + - name: prepare environment run: | - mkdir ${GITHUB_WORKSPACE}/.build - echo "SOURCE_DIR=$GITHUB_WORKSPACE" >> $GITHUB_ENV - echo "BUILD_DIR=$GITHUB_WORKSPACE/.build" >> $GITHUB_ENV - - - name: configure Conan - run: | - echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf - conan config install conan/profiles/ -tf $(conan config home)/profiles/ - conan profile show - - name: build dependencies - run: | - cd ${BUILD_DIR} - conan install ${SOURCE_DIR} \ - --output-folder ${BUILD_DIR} \ - --build missing \ - --settings:all build_type=Debug + mkdir -p ${build_dir} + echo "SOURCE_DIR=$(pwd)" >> $GITHUB_ENV + echo "BUILD_DIR=$(pwd)/${build_dir}" >> $GITHUB_ENV - name: build with instrumentation run: | From 5c2a3a27798d1c17d32c265d3b003ce420082e6a Mon Sep 17 00:00:00 2001 From: Bart Date: Thu, 24 Jul 2025 14:53:14 -0400 Subject: [PATCH 5/7] refactor: Update rocksdb (#5568) This change updates RocksDB to its latest version. RocksDB is backward-compatible, so even though this is a major version bump, databases created with previous versions will continue to function. The external RocksDB folder is removed, as the latest version available via Conan Center no longer needs custom patches. --- .github/actions/dependencies/action.yml | 3 - .github/workflows/macos.yml | 1 - .github/workflows/nix.yml | 1 - .github/workflows/windows.yml | 1 - BUILD.md | 8 - conanfile.py | 2 +- external/rocksdb/conandata.yml | 12 - external/rocksdb/conanfile.py | 235 ------------- .../patches/9.7.3-0001-memory-leak.patch | 319 ------------------ .../9.x.x-0001-exclude-thirdparty.patch | 30 -- 10 files changed, 1 insertion(+), 611 deletions(-) delete mode 100644 external/rocksdb/conandata.yml delete mode 100644 external/rocksdb/conanfile.py delete mode 100644 external/rocksdb/patches/9.7.3-0001-memory-leak.patch delete mode 100644 external/rocksdb/patches/9.x.x-0001-exclude-thirdparty.patch diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml index eeb8df105c..7ece9710a8 100644 --- a/.github/actions/dependencies/action.yml +++ b/.github/actions/dependencies/action.yml @@ -10,7 +10,6 @@ runs: shell: bash run: | conan export --version 1.1.10 external/snappy - conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci - name: add Ripple Conan remote if: env.CONAN_URL != '' @@ -22,7 +21,6 @@ runs: fi conan remote add --index 0 ripple "${CONAN_URL}" echo "Added conan remote ripple at ${CONAN_URL}" - - name: try to authenticate to Ripple Conan remote if: env.CONAN_LOGIN_USERNAME_RIPPLE != '' && env.CONAN_PASSWORD_RIPPLE != '' id: remote @@ -31,7 +29,6 @@ runs: echo "Authenticating to ripple remote..." conan remote auth ripple --force conan remote list-users - - name: list missing binaries id: binaries shell: bash diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 3c47a8bd53..adea15af9e 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -94,7 +94,6 @@ jobs: shell: bash run: | conan export --version 1.1.10 external/snappy - conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci - name: add Ripple Conan remote if: env.CONAN_URL != '' diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index c3560dbab7..d6490e4caa 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -99,7 +99,6 @@ jobs: run: tar -czf conan.tar.gz -C ${CONAN_HOME} . - name: build dependencies uses: ./.github/actions/dependencies - with: configuration: ${{ matrix.configuration }} - name: upload archive diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 1479c47600..84a91bcb4e 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -89,7 +89,6 @@ jobs: shell: bash run: | conan export --version 1.1.10 external/snappy - conan export --version 9.7.3 external/rocksdb conan export --version 4.0.3 external/soci - name: add Ripple Conan remote if: env.CONAN_URL != '' diff --git a/BUILD.md b/BUILD.md index fba238d2bc..8e2d5ffb2b 100644 --- a/BUILD.md +++ b/BUILD.md @@ -171,14 +171,6 @@ which allows you to statically link it with GCC, if you want. conan export --version 1.1.10 external/snappy ``` -Export our [Conan recipe for RocksDB](./external/rocksdb). -It does not override paths to dependencies when building with Visual Studio. - - ``` - # Conan 2.x - conan export --version 9.7.3 external/rocksdb - ``` - Export our [Conan recipe for SOCI](./external/soci). It patches their CMake to correctly import its dependencies. diff --git a/conanfile.py b/conanfile.py index bb65969288..bad4dfd111 100644 --- a/conanfile.py +++ b/conanfile.py @@ -112,7 +112,7 @@ class Xrpl(ConanFile): if self.options.jemalloc: self.requires('jemalloc/5.3.0') if self.options.rocksdb: - self.requires('rocksdb/9.7.3') + self.requires('rocksdb/10.0.1') self.requires('xxhash/0.8.3', **transitive_headers_opt) exports_sources = ( diff --git a/external/rocksdb/conandata.yml b/external/rocksdb/conandata.yml deleted file mode 100644 index 7d7a575d98..0000000000 --- a/external/rocksdb/conandata.yml +++ /dev/null @@ -1,12 +0,0 @@ -sources: - "9.7.3": - url: "https://github.com/facebook/rocksdb/archive/refs/tags/v9.7.3.tar.gz" - sha256: "acfabb989cbfb5b5c4d23214819b059638193ec33dad2d88373c46448d16d38b" -patches: - "9.7.3": - - patch_file: "patches/9.x.x-0001-exclude-thirdparty.patch" - patch_description: "Do not include thirdparty.inc" - patch_type: "portability" - - patch_file: "patches/9.7.3-0001-memory-leak.patch" - patch_description: "Fix a leak of obsolete blob files left open until DB::Close()" - patch_type: "portability" diff --git a/external/rocksdb/conanfile.py b/external/rocksdb/conanfile.py deleted file mode 100644 index 8b85ce1540..0000000000 --- a/external/rocksdb/conanfile.py +++ /dev/null @@ -1,235 +0,0 @@ -import os -import glob -import shutil - -from conan import ConanFile -from conan.errors import ConanInvalidConfiguration -from conan.tools.build import check_min_cppstd -from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout -from conan.tools.files import apply_conandata_patches, collect_libs, copy, export_conandata_patches, get, rm, rmdir -from conan.tools.microsoft import check_min_vs, is_msvc, is_msvc_static_runtime -from conan.tools.scm import Version - -required_conan_version = ">=1.53.0" - - -class RocksDBConan(ConanFile): - name = "rocksdb" - description = "A library that provides an embeddable, persistent key-value store for fast storage" - license = ("GPL-2.0-only", "Apache-2.0") - url = "https://github.com/conan-io/conan-center-index" - homepage = "https://github.com/facebook/rocksdb" - topics = ("database", "leveldb", "facebook", "key-value") - package_type = "library" - settings = "os", "arch", "compiler", "build_type" - options = { - "shared": [True, False], - "fPIC": [True, False], - "lite": [True, False], - "with_gflags": [True, False], - "with_snappy": [True, False], - "with_lz4": [True, False], - "with_zlib": [True, False], - "with_zstd": [True, False], - "with_tbb": [True, False], - "with_jemalloc": [True, False], - "enable_sse": [False, "sse42", "avx2"], - "use_rtti": [True, False], - } - default_options = { - "shared": False, - "fPIC": True, - "lite": False, - "with_snappy": False, - "with_lz4": False, - "with_zlib": False, - "with_zstd": False, - "with_gflags": False, - "with_tbb": False, - "with_jemalloc": False, - "enable_sse": False, - "use_rtti": False, - } - - @property - def _min_cppstd(self): - return "11" if Version(self.version) < "8.8.1" else "17" - - @property - def _compilers_minimum_version(self): - return {} if self._min_cppstd == "11" else { - "apple-clang": "10", - "clang": "7", - "gcc": "7", - "msvc": "191", - "Visual Studio": "15", - } - - def export_sources(self): - export_conandata_patches(self) - - def config_options(self): - if self.settings.os == "Windows": - del self.options.fPIC - if self.settings.arch != "x86_64": - del self.options.with_tbb - if self.settings.build_type == "Debug": - self.options.use_rtti = True # Rtti are used in asserts for debug mode... - - def configure(self): - if self.options.shared: - self.options.rm_safe("fPIC") - - def layout(self): - cmake_layout(self, src_folder="src") - - def requirements(self): - if self.options.with_gflags: - self.requires("gflags/2.2.2") - if self.options.with_snappy: - self.requires("snappy/1.1.10") - if self.options.with_lz4: - self.requires("lz4/1.10.0") - if self.options.with_zlib: - self.requires("zlib/[>=1.2.11 <2]") - if self.options.with_zstd: - self.requires("zstd/1.5.6") - if self.options.get_safe("with_tbb"): - self.requires("onetbb/2021.12.0") - if self.options.with_jemalloc: - self.requires("jemalloc/5.3.0") - - def validate(self): - if self.settings.compiler.get_safe("cppstd"): - check_min_cppstd(self, self._min_cppstd) - - minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False) - if minimum_version and Version(self.settings.compiler.version) < minimum_version: - raise ConanInvalidConfiguration( - f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support." - ) - - if self.settings.arch not in ["x86_64", "ppc64le", "ppc64", "mips64", "armv8"]: - raise ConanInvalidConfiguration("Rocksdb requires 64 bits") - - check_min_vs(self, "191") - - if self.version == "6.20.3" and \ - self.settings.os == "Linux" and \ - self.settings.compiler == "gcc" and \ - Version(self.settings.compiler.version) < "5": - raise ConanInvalidConfiguration("Rocksdb 6.20.3 is not compilable with gcc <5.") # See https://github.com/facebook/rocksdb/issues/3522 - - def source(self): - get(self, **self.conan_data["sources"][self.version], strip_root=True) - - def generate(self): - tc = CMakeToolchain(self) - tc.variables["FAIL_ON_WARNINGS"] = False - tc.variables["WITH_TESTS"] = False - tc.variables["WITH_TOOLS"] = False - tc.variables["WITH_CORE_TOOLS"] = False - tc.variables["WITH_BENCHMARK_TOOLS"] = False - tc.variables["WITH_FOLLY_DISTRIBUTED_MUTEX"] = False - if is_msvc(self): - tc.variables["WITH_MD_LIBRARY"] = not is_msvc_static_runtime(self) - tc.variables["ROCKSDB_INSTALL_ON_WINDOWS"] = self.settings.os == "Windows" - tc.variables["ROCKSDB_LITE"] = self.options.lite - tc.variables["WITH_GFLAGS"] = self.options.with_gflags - tc.variables["WITH_SNAPPY"] = self.options.with_snappy - tc.variables["WITH_LZ4"] = self.options.with_lz4 - tc.variables["WITH_ZLIB"] = self.options.with_zlib - tc.variables["WITH_ZSTD"] = self.options.with_zstd - tc.variables["WITH_TBB"] = self.options.get_safe("with_tbb", False) - tc.variables["WITH_JEMALLOC"] = self.options.with_jemalloc - tc.variables["ROCKSDB_BUILD_SHARED"] = self.options.shared - tc.variables["ROCKSDB_LIBRARY_EXPORTS"] = self.settings.os == "Windows" and self.options.shared - tc.variables["ROCKSDB_DLL" ] = self.settings.os == "Windows" and self.options.shared - tc.variables["USE_RTTI"] = self.options.use_rtti - if not bool(self.options.enable_sse): - tc.variables["PORTABLE"] = True - tc.variables["FORCE_SSE42"] = False - elif self.options.enable_sse == "sse42": - tc.variables["PORTABLE"] = True - tc.variables["FORCE_SSE42"] = True - elif self.options.enable_sse == "avx2": - tc.variables["PORTABLE"] = False - tc.variables["FORCE_SSE42"] = False - # not available yet in CCI - tc.variables["WITH_NUMA"] = False - tc.generate() - - deps = CMakeDeps(self) - if self.options.with_jemalloc: - deps.set_property("jemalloc", "cmake_file_name", "JeMalloc") - deps.set_property("jemalloc", "cmake_target_name", "JeMalloc::JeMalloc") - if self.options.with_zstd: - deps.set_property("zstd", "cmake_target_name", "zstd::zstd") - deps.generate() - - def build(self): - apply_conandata_patches(self) - cmake = CMake(self) - cmake.configure() - cmake.build() - - def _remove_static_libraries(self): - rm(self, "rocksdb.lib", os.path.join(self.package_folder, "lib")) - for lib in glob.glob(os.path.join(self.package_folder, "lib", "*.a")): - if not lib.endswith(".dll.a"): - os.remove(lib) - - def _remove_cpp_headers(self): - for path in glob.glob(os.path.join(self.package_folder, "include", "rocksdb", "*")): - if path != os.path.join(self.package_folder, "include", "rocksdb", "c.h"): - if os.path.isfile(path): - os.remove(path) - else: - shutil.rmtree(path) - - def package(self): - copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) - copy(self, "LICENSE*", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) - cmake = CMake(self) - cmake.install() - if self.options.shared: - self._remove_static_libraries() - self._remove_cpp_headers() # Force stable ABI for shared libraries - rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) - rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig")) - - def package_info(self): - cmake_target = "rocksdb-shared" if self.options.shared else "rocksdb" - self.cpp_info.set_property("cmake_file_name", "RocksDB") - self.cpp_info.set_property("cmake_target_name", f"RocksDB::{cmake_target}") - # TODO: back to global scope in conan v2 once cmake_find_package* generators removed - self.cpp_info.components["librocksdb"].libs = collect_libs(self) - if self.settings.os == "Windows": - self.cpp_info.components["librocksdb"].system_libs = ["shlwapi", "rpcrt4"] - if self.options.shared: - self.cpp_info.components["librocksdb"].defines = ["ROCKSDB_DLL"] - elif self.settings.os in ["Linux", "FreeBSD"]: - self.cpp_info.components["librocksdb"].system_libs = ["pthread", "m"] - if self.options.lite: - self.cpp_info.components["librocksdb"].defines.append("ROCKSDB_LITE") - - # TODO: to remove in conan v2 once cmake_find_package* generators removed - self.cpp_info.names["cmake_find_package"] = "RocksDB" - self.cpp_info.names["cmake_find_package_multi"] = "RocksDB" - self.cpp_info.components["librocksdb"].names["cmake_find_package"] = cmake_target - self.cpp_info.components["librocksdb"].names["cmake_find_package_multi"] = cmake_target - self.cpp_info.components["librocksdb"].set_property("cmake_target_name", f"RocksDB::{cmake_target}") - if self.options.with_gflags: - self.cpp_info.components["librocksdb"].requires.append("gflags::gflags") - if self.options.with_snappy: - self.cpp_info.components["librocksdb"].requires.append("snappy::snappy") - if self.options.with_lz4: - self.cpp_info.components["librocksdb"].requires.append("lz4::lz4") - if self.options.with_zlib: - self.cpp_info.components["librocksdb"].requires.append("zlib::zlib") - if self.options.with_zstd: - self.cpp_info.components["librocksdb"].requires.append("zstd::zstd") - if self.options.get_safe("with_tbb"): - self.cpp_info.components["librocksdb"].requires.append("onetbb::onetbb") - if self.options.with_jemalloc: - self.cpp_info.components["librocksdb"].requires.append("jemalloc::jemalloc") diff --git a/external/rocksdb/patches/9.7.3-0001-memory-leak.patch b/external/rocksdb/patches/9.7.3-0001-memory-leak.patch deleted file mode 100644 index bb086e6cb2..0000000000 --- a/external/rocksdb/patches/9.7.3-0001-memory-leak.patch +++ /dev/null @@ -1,319 +0,0 @@ -diff --git a/HISTORY.md b/HISTORY.md -index 36d472229..05ad1a202 100644 ---- a/HISTORY.md -+++ b/HISTORY.md -@@ -1,6 +1,10 @@ - # Rocksdb Change Log - > NOTE: Entries for next release do not go here. Follow instructions in `unreleased_history/README.txt` - -+## 9.7.4 (10/31/2024) -+### Bug Fixes -+* Fix a leak of obsolete blob files left open until DB::Close(). This bug was introduced in version 9.4.0. -+ - ## 9.7.3 (10/16/2024) - ### Behavior Changes - * OPTIONS file to be loaded by remote worker is now preserved so that it does not get purged by the primary host. A similar technique as how we are preserving new SST files from getting purged is used for this. min_options_file_numbers_ is tracked like pending_outputs_ is tracked. -diff --git a/db/blob/blob_file_cache.cc b/db/blob/blob_file_cache.cc -index 5f340aadf..1b9faa238 100644 ---- a/db/blob/blob_file_cache.cc -+++ b/db/blob/blob_file_cache.cc -@@ -42,6 +42,7 @@ Status BlobFileCache::GetBlobFileReader( - assert(blob_file_reader); - assert(blob_file_reader->IsEmpty()); - -+ // NOTE: sharing same Cache with table_cache - const Slice key = GetSliceForKey(&blob_file_number); - - assert(cache_); -@@ -98,4 +99,13 @@ Status BlobFileCache::GetBlobFileReader( - return Status::OK(); - } - -+void BlobFileCache::Evict(uint64_t blob_file_number) { -+ // NOTE: sharing same Cache with table_cache -+ const Slice key = GetSliceForKey(&blob_file_number); -+ -+ assert(cache_); -+ -+ cache_.get()->Erase(key); -+} -+ - } // namespace ROCKSDB_NAMESPACE -diff --git a/db/blob/blob_file_cache.h b/db/blob/blob_file_cache.h -index 740e67ada..6858d012b 100644 ---- a/db/blob/blob_file_cache.h -+++ b/db/blob/blob_file_cache.h -@@ -36,6 +36,15 @@ class BlobFileCache { - uint64_t blob_file_number, - CacheHandleGuard* blob_file_reader); - -+ // Called when a blob file is obsolete to ensure it is removed from the cache -+ // to avoid effectively leaking the open file and assicated memory -+ void Evict(uint64_t blob_file_number); -+ -+ // Used to identify cache entries for blob files (not normally useful) -+ static const Cache::CacheItemHelper* GetHelper() { -+ return CacheInterface::GetBasicHelper(); -+ } -+ - private: - using CacheInterface = - BasicTypedCacheInterface; -diff --git a/db/column_family.h b/db/column_family.h -index e4b7adde8..86637736a 100644 ---- a/db/column_family.h -+++ b/db/column_family.h -@@ -401,6 +401,7 @@ class ColumnFamilyData { - SequenceNumber earliest_seq); - - TableCache* table_cache() const { return table_cache_.get(); } -+ BlobFileCache* blob_file_cache() const { return blob_file_cache_.get(); } - BlobSource* blob_source() const { return blob_source_.get(); } - - // See documentation in compaction_picker.h -diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc -index 261593423..06573ac2e 100644 ---- a/db/db_impl/db_impl.cc -+++ b/db/db_impl/db_impl.cc -@@ -659,8 +659,9 @@ Status DBImpl::CloseHelper() { - // We need to release them before the block cache is destroyed. The block - // cache may be destroyed inside versions_.reset(), when column family data - // list is destroyed, so leaving handles in table cache after -- // versions_.reset() may cause issues. -- // Here we clean all unreferenced handles in table cache. -+ // versions_.reset() may cause issues. Here we clean all unreferenced handles -+ // in table cache, and (for certain builds/conditions) assert that no obsolete -+ // files are hanging around unreferenced (leak) in the table/blob file cache. - // Now we assume all user queries have finished, so only version set itself - // can possibly hold the blocks from block cache. After releasing unreferenced - // handles here, only handles held by version set left and inside -@@ -668,6 +669,9 @@ Status DBImpl::CloseHelper() { - // time a handle is released, we erase it from the cache too. By doing that, - // we can guarantee that after versions_.reset(), table cache is empty - // so the cache can be safely destroyed. -+#ifndef NDEBUG -+ TEST_VerifyNoObsoleteFilesCached(/*db_mutex_already_held=*/true); -+#endif // !NDEBUG - table_cache_->EraseUnRefEntries(); - - for (auto& txn_entry : recovered_transactions_) { -@@ -3227,6 +3231,8 @@ Status DBImpl::MultiGetImpl( - s = Status::Aborted(); - break; - } -+ // This could be a long-running operation -+ ROCKSDB_THREAD_YIELD_HOOK(); - } - - // Post processing (decrement reference counts and record statistics) -diff --git a/db/db_impl/db_impl.h b/db/db_impl/db_impl.h -index 5e4fa310b..ccc0abfa7 100644 ---- a/db/db_impl/db_impl.h -+++ b/db/db_impl/db_impl.h -@@ -1241,9 +1241,14 @@ class DBImpl : public DB { - static Status TEST_ValidateOptions(const DBOptions& db_options) { - return ValidateOptions(db_options); - } -- - #endif // NDEBUG - -+ // In certain configurations, verify that the table/blob file cache only -+ // contains entries for live files, to check for effective leaks of open -+ // files. This can only be called when purging of obsolete files has -+ // "settled," such as during parts of DB Close(). -+ void TEST_VerifyNoObsoleteFilesCached(bool db_mutex_already_held) const; -+ - // persist stats to column family "_persistent_stats" - void PersistStats(); - -diff --git a/db/db_impl/db_impl_debug.cc b/db/db_impl/db_impl_debug.cc -index 790a50d7a..67f5b4aaf 100644 ---- a/db/db_impl/db_impl_debug.cc -+++ b/db/db_impl/db_impl_debug.cc -@@ -9,6 +9,7 @@ - - #ifndef NDEBUG - -+#include "db/blob/blob_file_cache.h" - #include "db/column_family.h" - #include "db/db_impl/db_impl.h" - #include "db/error_handler.h" -@@ -328,5 +329,49 @@ size_t DBImpl::TEST_EstimateInMemoryStatsHistorySize() const { - InstrumentedMutexLock l(&const_cast(this)->stats_history_mutex_); - return EstimateInMemoryStatsHistorySize(); - } -+ -+void DBImpl::TEST_VerifyNoObsoleteFilesCached( -+ bool db_mutex_already_held) const { -+ // This check is somewhat expensive and obscure to make a part of every -+ // unit test in every build variety. Thus, we only enable it for ASAN builds. -+ if (!kMustFreeHeapAllocations) { -+ return; -+ } -+ -+ std::optional l; -+ if (db_mutex_already_held) { -+ mutex_.AssertHeld(); -+ } else { -+ l.emplace(&mutex_); -+ } -+ -+ std::vector live_files; -+ for (auto cfd : *versions_->GetColumnFamilySet()) { -+ if (cfd->IsDropped()) { -+ continue; -+ } -+ // Sneakily add both SST and blob files to the same list -+ cfd->current()->AddLiveFiles(&live_files, &live_files); -+ } -+ std::sort(live_files.begin(), live_files.end()); -+ -+ auto fn = [&live_files](const Slice& key, Cache::ObjectPtr, size_t, -+ const Cache::CacheItemHelper* helper) { -+ if (helper != BlobFileCache::GetHelper()) { -+ // Skip non-blob files for now -+ // FIXME: diagnose and fix the leaks of obsolete SST files revealed in -+ // unit tests. -+ return; -+ } -+ // See TableCache and BlobFileCache -+ assert(key.size() == sizeof(uint64_t)); -+ uint64_t file_number; -+ GetUnaligned(reinterpret_cast(key.data()), &file_number); -+ // Assert file is in sorted live_files -+ assert( -+ std::binary_search(live_files.begin(), live_files.end(), file_number)); -+ }; -+ table_cache_->ApplyToAllEntries(fn, {}); -+} - } // namespace ROCKSDB_NAMESPACE - #endif // NDEBUG -diff --git a/db/db_iter.cc b/db/db_iter.cc -index e02586377..bf4749eb9 100644 ---- a/db/db_iter.cc -+++ b/db/db_iter.cc -@@ -540,6 +540,8 @@ bool DBIter::FindNextUserEntryInternal(bool skipping_saved_key, - } else { - iter_.Next(); - } -+ // This could be a long-running operation due to tombstones, etc. -+ ROCKSDB_THREAD_YIELD_HOOK(); - } while (iter_.Valid()); - - valid_ = false; -diff --git a/db/table_cache.cc b/db/table_cache.cc -index 71fc29c32..8a5be75e8 100644 ---- a/db/table_cache.cc -+++ b/db/table_cache.cc -@@ -164,6 +164,7 @@ Status TableCache::GetTableReader( - } - - Cache::Handle* TableCache::Lookup(Cache* cache, uint64_t file_number) { -+ // NOTE: sharing same Cache with BlobFileCache - Slice key = GetSliceForFileNumber(&file_number); - return cache->Lookup(key); - } -@@ -179,6 +180,7 @@ Status TableCache::FindTable( - size_t max_file_size_for_l0_meta_pin, Temperature file_temperature) { - PERF_TIMER_GUARD_WITH_CLOCK(find_table_nanos, ioptions_.clock); - uint64_t number = file_meta.fd.GetNumber(); -+ // NOTE: sharing same Cache with BlobFileCache - Slice key = GetSliceForFileNumber(&number); - *handle = cache_.Lookup(key); - TEST_SYNC_POINT_CALLBACK("TableCache::FindTable:0", -diff --git a/db/version_builder.cc b/db/version_builder.cc -index ed8ab8214..c98f53f42 100644 ---- a/db/version_builder.cc -+++ b/db/version_builder.cc -@@ -24,6 +24,7 @@ - #include - - #include "cache/cache_reservation_manager.h" -+#include "db/blob/blob_file_cache.h" - #include "db/blob/blob_file_meta.h" - #include "db/dbformat.h" - #include "db/internal_stats.h" -@@ -744,12 +745,9 @@ class VersionBuilder::Rep { - return Status::Corruption("VersionBuilder", oss.str()); - } - -- // Note: we use C++11 for now but in C++14, this could be done in a more -- // elegant way using generalized lambda capture. -- VersionSet* const vs = version_set_; -- const ImmutableCFOptions* const ioptions = ioptions_; -- -- auto deleter = [vs, ioptions](SharedBlobFileMetaData* shared_meta) { -+ auto deleter = [vs = version_set_, ioptions = ioptions_, -+ bc = cfd_ ? cfd_->blob_file_cache() -+ : nullptr](SharedBlobFileMetaData* shared_meta) { - if (vs) { - assert(ioptions); - assert(!ioptions->cf_paths.empty()); -@@ -758,6 +756,9 @@ class VersionBuilder::Rep { - vs->AddObsoleteBlobFile(shared_meta->GetBlobFileNumber(), - ioptions->cf_paths.front().path); - } -+ if (bc) { -+ bc->Evict(shared_meta->GetBlobFileNumber()); -+ } - - delete shared_meta; - }; -@@ -766,7 +767,7 @@ class VersionBuilder::Rep { - blob_file_number, blob_file_addition.GetTotalBlobCount(), - blob_file_addition.GetTotalBlobBytes(), - blob_file_addition.GetChecksumMethod(), -- blob_file_addition.GetChecksumValue(), deleter); -+ blob_file_addition.GetChecksumValue(), std::move(deleter)); - - mutable_blob_file_metas_.emplace( - blob_file_number, MutableBlobFileMetaData(std::move(shared_meta))); -diff --git a/db/version_set.h b/db/version_set.h -index 9336782b1..024f869e7 100644 ---- a/db/version_set.h -+++ b/db/version_set.h -@@ -1514,7 +1514,6 @@ class VersionSet { - void GetLiveFilesMetaData(std::vector* metadata); - - void AddObsoleteBlobFile(uint64_t blob_file_number, std::string path) { -- // TODO: Erase file from BlobFileCache? - obsolete_blob_files_.emplace_back(blob_file_number, std::move(path)); - } - -diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h -index 2a19796b8..0afa2cab1 100644 ---- a/include/rocksdb/version.h -+++ b/include/rocksdb/version.h -@@ -13,7 +13,7 @@ - // minor or major version number planned for release. - #define ROCKSDB_MAJOR 9 - #define ROCKSDB_MINOR 7 --#define ROCKSDB_PATCH 3 -+#define ROCKSDB_PATCH 4 - - // Do not use these. We made the mistake of declaring macros starting with - // double underscore. Now we have to live with our choice. We'll deprecate these -diff --git a/port/port.h b/port/port.h -index 13aa56d47..141716e5b 100644 ---- a/port/port.h -+++ b/port/port.h -@@ -19,3 +19,19 @@ - #elif defined(OS_WIN) - #include "port/win/port_win.h" - #endif -+ -+#ifdef OS_LINUX -+// A temporary hook into long-running RocksDB threads to support modifying their -+// priority etc. This should become a public API hook once the requirements -+// are better understood. -+extern "C" void RocksDbThreadYield() __attribute__((__weak__)); -+#define ROCKSDB_THREAD_YIELD_HOOK() \ -+ { \ -+ if (RocksDbThreadYield) { \ -+ RocksDbThreadYield(); \ -+ } \ -+ } -+#else -+#define ROCKSDB_THREAD_YIELD_HOOK() \ -+ {} -+#endif diff --git a/external/rocksdb/patches/9.x.x-0001-exclude-thirdparty.patch b/external/rocksdb/patches/9.x.x-0001-exclude-thirdparty.patch deleted file mode 100644 index 7b5858bc1e..0000000000 --- a/external/rocksdb/patches/9.x.x-0001-exclude-thirdparty.patch +++ /dev/null @@ -1,30 +0,0 @@ -diff --git a/CMakeLists.txt b/CMakeLists.txt -index 93b884d..b715cb6 100644 ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -106,14 +106,9 @@ endif() - include(CMakeDependentOption) - - if(MSVC) -- option(WITH_GFLAGS "build with GFlags" OFF) - option(WITH_XPRESS "build with windows built in compression" OFF) -- option(ROCKSDB_SKIP_THIRDPARTY "skip thirdparty.inc" OFF) -- -- if(NOT ROCKSDB_SKIP_THIRDPARTY) -- include(${CMAKE_CURRENT_SOURCE_DIR}/thirdparty.inc) -- endif() --else() -+endif() -+if(TRUE) - if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD") - # FreeBSD has jemalloc as default malloc - # but it does not have all the jemalloc files in include/... -@@ -126,7 +121,7 @@ else() - endif() - endif() - -- if(MINGW) -+ if(MSVC OR MINGW) - option(WITH_GFLAGS "build with GFlags" OFF) - else() - option(WITH_GFLAGS "build with GFlags" ON) From e7a7bb83c1a3916095274b6ea1095f3b50722333 Mon Sep 17 00:00:00 2001 From: Bronek Kozicki Date: Fri, 25 Jul 2025 14:53:25 +0100 Subject: [PATCH 6/7] VaultWithdraw destination account bugfix (#5572) #5224 added (among other things) a `VaultWithdraw` transaction that allows setting the recipient of the withdrawn funds in the `Destination` transaction field. This technically turns this transaction into a payment, and in some respect the implementation does follow payment rules, e.g. enforcement of `lsfRequireDestTag` or `lsfDepositAuth`, or that MPT transfer has destination `MPToken`. However for IOUs, it missed verification that the destination account has a trust line to the asset issuer. Since the default behavior of `accountSendIOU` is to create this trust line (if missing), this is what `VaultWithdraw` currently does. This is incorrect, since the `Destination` might not be interested in holding the asset in question; this basically enables spammy transfers. This change, therefore, removes automatic creation of a trust line to the `Destination` account in `VaultWithdraw`. --- .../xrpl/protocol/detail/transactions.macro | 1 + src/test/app/Vault_test.cpp | 205 +++++++++++++++++- src/xrpld/app/tx/detail/Escrow.cpp | 8 +- src/xrpld/app/tx/detail/VaultWithdraw.cpp | 36 ++- src/xrpld/ledger/View.h | 81 +++++-- src/xrpld/ledger/detail/View.cpp | 29 ++- 6 files changed, 315 insertions(+), 45 deletions(-) diff --git a/include/xrpl/protocol/detail/transactions.macro b/include/xrpl/protocol/detail/transactions.macro index 38665296cd..89e9a16df5 100644 --- a/include/xrpl/protocol/detail/transactions.macro +++ b/include/xrpl/protocol/detail/transactions.macro @@ -509,6 +509,7 @@ TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, Delegation::delegatable, ({ {sfVaultID, soeREQUIRED}, {sfAmount, soeREQUIRED, soeMPTSupported}, {sfDestination, soeOPTIONAL}, + {sfDestinationTag, soeOPTIONAL}, })) /** This transaction claws back tokens from a vault. */ diff --git a/src/test/app/Vault_test.cpp b/src/test/app/Vault_test.cpp index ce97eff24f..f9036719cd 100644 --- a/src/test/app/Vault_test.cpp +++ b/src/test/app/Vault_test.cpp @@ -234,6 +234,28 @@ class Vault_test : public beast::unit_test::suite env(tx, ter{tecNO_PERMISSION}); } + { + testcase(prefix + " fail to withdraw to zero destination"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + tx[sfDestination] = "0"; + env(tx, ter(temMALFORMED)); + } + + { + testcase( + prefix + + " fail to withdraw with tag but without destination"); + auto tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + tx[sfDestinationTag] = "0"; + env(tx, ter(temMALFORMED)); + } + if (!asset.raw().native()) { testcase( @@ -1335,6 +1357,7 @@ class Vault_test : public beast::unit_test::suite struct CaseArgs { bool enableClawback = true; + bool requireAuth = true; }; auto testCase = [this]( @@ -1356,16 +1379,20 @@ class Vault_test : public beast::unit_test::suite Vault vault{env}; MPTTester mptt{env, issuer, mptInitNoFund}; + auto const none = LedgerSpecificFlags(0); mptt.create( {.flags = tfMPTCanTransfer | tfMPTCanLock | - (args.enableClawback ? lsfMPTCanClawback - : LedgerSpecificFlags(0)) | - tfMPTRequireAuth}); + (args.enableClawback ? tfMPTCanClawback : none) | + (args.requireAuth ? tfMPTRequireAuth : none)}); PrettyAsset asset = mptt.issuanceID(); mptt.authorize({.account = owner}); - mptt.authorize({.account = issuer, .holder = owner}); mptt.authorize({.account = depositor}); - mptt.authorize({.account = issuer, .holder = depositor}); + if (args.requireAuth) + { + mptt.authorize({.account = issuer, .holder = owner}); + mptt.authorize({.account = issuer, .holder = depositor}); + } + env(pay(issuer, depositor, asset(1000))); env.close(); @@ -1514,6 +1541,100 @@ class Vault_test : public beast::unit_test::suite } }); + testCase( + [this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase( + "MPT 3rd party without MPToken cannot be withdrawal " + "destination"); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx); + env.close(); + + { + // Set destination to 3rd party without MPToken + Account charlie{"charlie"}; + env.fund(XRP(1000), charlie); + env.close(); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + tx[sfDestination] = charlie.human(); + env(tx, ter(tecNO_AUTH)); + } + }, + {.requireAuth = false}); + + testCase( + [this]( + Env& env, + Account const& issuer, + Account const& owner, + Account const& depositor, + PrettyAsset const& asset, + Vault& vault, + MPTTester& mptt) { + testcase("MPT depositor without MPToken cannot withdraw"); + + auto [tx, keylet] = + vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + tx = vault.deposit( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(1000)}); + env(tx); + env.close(); + + { + // Remove depositor's MPToken and withdraw will fail + mptt.authorize( + {.account = depositor, .flags = tfMPTUnauthorize}); + env.close(); + auto const mptoken = + env.le(keylet::mptoken(mptt.issuanceID(), depositor)); + BEAST_EXPECT(mptoken == nullptr); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx, ter(tecNO_AUTH)); + } + + { + // Restore depositor's MPToken and withdraw will succeed + mptt.authorize({.account = depositor}); + env.close(); + + tx = vault.withdraw( + {.depositor = depositor, + .id = keylet.key, + .amount = asset(100)}); + env(tx); + } + }, + {.requireAuth = false}); + testCase([this]( Env& env, Account const& issuer, @@ -1803,6 +1924,7 @@ class Vault_test : public beast::unit_test::suite PrettyAsset const asset = issuer["IOU"]; env.trust(asset(1000), owner); + env.trust(asset(1000), charlie); env(pay(issuer, owner, asset(200))); env(rate(issuer, 1.25)); env.close(); @@ -2118,6 +2240,79 @@ class Vault_test : public beast::unit_test::suite env.close(); }); + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const& charlie, + auto, + Vault& vault, + PrettyAsset const& asset, + auto&&...) { + testcase("IOU no trust line to 3rd party"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + env(vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(100)})); + env.close(); + + Account const erin{"erin"}; + env.fund(XRP(1000), erin); + env.close(); + + // Withdraw to 3rd party without trust line + auto const tx1 = [&](ripple::Keylet keylet) { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + tx[sfDestination] = erin.human(); + return tx; + }(keylet); + env(tx1, ter{tecNO_LINE}); + }); + + testCase([&, this]( + Env& env, + Account const& owner, + Account const& issuer, + Account const& charlie, + auto, + Vault& vault, + PrettyAsset const& asset, + auto&&...) { + testcase("IOU no trust line to depositor"); + + auto [tx, keylet] = vault.create({.owner = owner, .asset = asset}); + env(tx); + env.close(); + + // reset limit, so deposit of all funds will delete the trust line + env.trust(asset(0), owner); + env.close(); + + env(vault.deposit( + {.depositor = owner, .id = keylet.key, .amount = asset(200)})); + env.close(); + + auto trustline = + env.le(keylet::line(owner, asset.raw().get())); + BEAST_EXPECT(trustline == nullptr); + + // Withdraw without trust line, will succeed + auto const tx1 = [&](ripple::Keylet keylet) { + auto tx = vault.withdraw( + {.depositor = owner, + .id = keylet.key, + .amount = asset(10)}); + return tx; + }(keylet); + env(tx1); + }); + testCase([&, this]( Env& env, Account const& owner, diff --git a/src/xrpld/app/tx/detail/Escrow.cpp b/src/xrpld/app/tx/detail/Escrow.cpp index c62c38c675..dd0ffac778 100644 --- a/src/xrpld/app/tx/detail/Escrow.cpp +++ b/src/xrpld/app/tx/detail/Escrow.cpp @@ -315,14 +315,14 @@ escrowCreatePreclaimHelper( // authorized auto const& mptIssue = amount.get(); if (auto const ter = - requireAuth(ctx.view, mptIssue, account, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, account, AuthType::WeakAuth); ter != tesSUCCESS) return ter; // If the issuer has requireAuth set, check if the destination is // authorized if (auto const ter = - requireAuth(ctx.view, mptIssue, dest, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, dest, AuthType::WeakAuth); ter != tesSUCCESS) return ter; @@ -746,7 +746,7 @@ escrowFinishPreclaimHelper( // authorized auto const& mptIssue = amount.get(); if (auto const ter = - requireAuth(ctx.view, mptIssue, dest, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, dest, AuthType::WeakAuth); ter != tesSUCCESS) return ter; @@ -1259,7 +1259,7 @@ escrowCancelPreclaimHelper( // authorized auto const& mptIssue = amount.get(); if (auto const ter = - requireAuth(ctx.view, mptIssue, account, MPTAuthType::WeakAuth); + requireAuth(ctx.view, mptIssue, account, AuthType::WeakAuth); ter != tesSUCCESS) return ter; diff --git a/src/xrpld/app/tx/detail/VaultWithdraw.cpp b/src/xrpld/app/tx/detail/VaultWithdraw.cpp index 7a8605cdbd..09a9fd14e1 100644 --- a/src/xrpld/app/tx/detail/VaultWithdraw.cpp +++ b/src/xrpld/app/tx/detail/VaultWithdraw.cpp @@ -52,9 +52,19 @@ VaultWithdraw::preflight(PreflightContext const& ctx) return temBAD_AMOUNT; if (auto const destination = ctx.tx[~sfDestination]; - destination && *destination == beast::zero) + destination.has_value()) { - JLOG(ctx.j.debug()) << "VaultWithdraw: zero/empty destination account."; + if (*destination == beast::zero) + { + JLOG(ctx.j.debug()) + << "VaultWithdraw: zero/empty destination account."; + return temMALFORMED; + } + } + else if (ctx.tx.isFieldPresent(sfDestinationTag)) + { + JLOG(ctx.j.debug()) << "VaultWithdraw: sfDestinationTag is set but " + "sfDestination is not"; return temMALFORMED; } @@ -123,33 +133,39 @@ VaultWithdraw::preclaim(PreclaimContext const& ctx) // Withdrawal to a 3rd party destination account is essentially a transfer, // via shares in the vault. Enforce all the usual asset transfer checks. + AuthType authType = AuthType::Legacy; if (account != dstAcct) { auto const sleDst = ctx.view.read(keylet::account(dstAcct)); if (sleDst == nullptr) return tecNO_DST; - if (sleDst->getFlags() & lsfRequireDestTag) + if (sleDst->isFlag(lsfRequireDestTag) && + !ctx.tx.isFieldPresent(sfDestinationTag)) return tecDST_TAG_NEEDED; // Cannot send without a tag - if (sleDst->getFlags() & lsfDepositAuth) + if (sleDst->isFlag(lsfDepositAuth)) { if (!ctx.view.exists(keylet::depositPreauth(dstAcct, account))) return tecNO_PERMISSION; } + // The destination account must have consented to receive the asset by + // creating a RippleState or MPToken + authType = AuthType::StrongAuth; } - // Destination MPToken must exist (if asset is an MPT) - if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct); + // Destination MPToken (for an MPT) or trust line (for an IOU) must exist + // if not sending to Account. + if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct, authType); !isTesSuccess(ter)) return ter; // Cannot withdraw from a Vault an Asset frozen for the destination account - if (isFrozen(ctx.view, dstAcct, vaultAsset)) - return vaultAsset.holds() ? tecFROZEN : tecLOCKED; + if (auto const ret = checkFrozen(ctx.view, dstAcct, vaultAsset)) + return ret; - if (isFrozen(ctx.view, account, vaultShare)) - return tecLOCKED; + if (auto const ret = checkFrozen(ctx.view, account, vaultShare)) + return ret; return tesSUCCESS; } diff --git a/src/xrpld/ledger/View.h b/src/xrpld/ledger/View.h index 8c391499b6..fc9360734d 100644 --- a/src/xrpld/ledger/View.h +++ b/src/xrpld/ledger/View.h @@ -175,6 +175,29 @@ isFrozen( asset.value()); } +[[nodiscard]] inline TER +checkFrozen(ReadView const& view, AccountID const& account, Issue const& issue) +{ + return isFrozen(view, account, issue) ? (TER)tecFROZEN : (TER)tesSUCCESS; +} + +[[nodiscard]] inline TER +checkFrozen( + ReadView const& view, + AccountID const& account, + MPTIssue const& mptIssue) +{ + return isFrozen(view, account, mptIssue) ? (TER)tecLOCKED : (TER)tesSUCCESS; +} + +[[nodiscard]] inline TER +checkFrozen(ReadView const& view, AccountID const& account, Asset const& asset) +{ + return std::visit( + [&](auto const& issue) { return checkFrozen(view, account, issue); }, + asset.value()); +} + [[nodiscard]] bool isAnyFrozen( ReadView const& view, @@ -725,19 +748,40 @@ transferXRP( STAmount const& amount, beast::Journal j); -/* Check if MPToken exists: - * - StrongAuth - before checking lsfMPTRequireAuth is set - * - WeakAuth - after checking if lsfMPTRequireAuth is set +/* Check if MPToken (for MPT) or trust line (for IOU) exists: + * - StrongAuth - before checking if authorization is required + * - WeakAuth + * for MPT - after checking lsfMPTRequireAuth flag + * for IOU - do not check if trust line exists + * - Legacy + * for MPT - before checking lsfMPTRequireAuth flag i.e. same as StrongAuth + * for IOU - do not check if trust line exists i.e. same as WeakAuth */ -enum class MPTAuthType : bool { StrongAuth = true, WeakAuth = false }; +enum class AuthType { StrongAuth, WeakAuth, Legacy }; /** Check if the account lacks required authorization. * - * Return tecNO_AUTH or tecNO_LINE if it does - * and tesSUCCESS otherwise. + * Return tecNO_AUTH or tecNO_LINE if it does + * and tesSUCCESS otherwise. + * + * If StrongAuth then return tecNO_LINE if the RippleState doesn't exist. Return + * tecNO_AUTH if lsfRequireAuth is set on the issuer's AccountRoot, and the + * RippleState does exist, and the RippleState is not authorized. + * + * If WeakAuth then return tecNO_AUTH if lsfRequireAuth is set, and the + * RippleState exists, and is not authorized. Return tecNO_LINE if + * lsfRequireAuth is set and the RippleState doesn't exist. Consequently, if + * WeakAuth and lsfRequireAuth is *not* set, this function will return + * tesSUCCESS even if RippleState does *not* exist. + * + * The default "Legacy" auth type is equivalent to WeakAuth. */ [[nodiscard]] TER -requireAuth(ReadView const& view, Issue const& issue, AccountID const& account); +requireAuth( + ReadView const& view, + Issue const& issue, + AccountID const& account, + AuthType authType = AuthType::Legacy); /** Check if the account lacks required authorization. * @@ -751,32 +795,33 @@ requireAuth(ReadView const& view, Issue const& issue, AccountID const& account); * purely defensive, as we currently do not allow such vaults to be created. * * If StrongAuth then return tecNO_AUTH if MPToken doesn't exist or - * lsfMPTRequireAuth is set and MPToken is not authorized. If WeakAuth then - * return tecNO_AUTH if lsfMPTRequireAuth is set and MPToken doesn't exist or is - * not authorized (explicitly or via credentials, if DomainID is set in - * MPTokenIssuance). Consequently, if WeakAuth and lsfMPTRequireAuth is *not* - * set, this function will return true even if MPToken does *not* exist. + * lsfMPTRequireAuth is set and MPToken is not authorized. + * + * If WeakAuth then return tecNO_AUTH if lsfMPTRequireAuth is set and MPToken + * doesn't exist or is not authorized (explicitly or via credentials, if + * DomainID is set in MPTokenIssuance). Consequently, if WeakAuth and + * lsfMPTRequireAuth is *not* set, this function will return true even if + * MPToken does *not* exist. + * + * The default "Legacy" auth type is equivalent to StrongAuth. */ [[nodiscard]] TER requireAuth( ReadView const& view, MPTIssue const& mptIssue, AccountID const& account, - MPTAuthType authType = MPTAuthType::StrongAuth, + AuthType authType = AuthType::Legacy, int depth = 0); [[nodiscard]] TER inline requireAuth( ReadView const& view, Asset const& asset, AccountID const& account, - MPTAuthType authType = MPTAuthType::StrongAuth) + AuthType authType = AuthType::Legacy) { return std::visit( [&](TIss const& issue_) { - if constexpr (std::is_same_v) - return requireAuth(view, issue_, account); - else - return requireAuth(view, issue_, account, authType); + return requireAuth(view, issue_, account, authType); }, asset.value()); } diff --git a/src/xrpld/ledger/detail/View.cpp b/src/xrpld/ledger/detail/View.cpp index cb95819014..1f616ed491 100644 --- a/src/xrpld/ledger/detail/View.cpp +++ b/src/xrpld/ledger/detail/View.cpp @@ -505,8 +505,8 @@ accountHolds( if (zeroIfUnauthorized == ahZERO_IF_UNAUTHORIZED && view.rules().enabled(featureSingleAssetVault)) { - if (auto const err = requireAuth( - view, mptIssue, account, MPTAuthType::StrongAuth); + if (auto const err = + requireAuth(view, mptIssue, account, AuthType::StrongAuth); !isTesSuccess(err)) amount.clear(mptIssue); } @@ -2298,15 +2298,27 @@ transferXRP( } TER -requireAuth(ReadView const& view, Issue const& issue, AccountID const& account) +requireAuth( + ReadView const& view, + Issue const& issue, + AccountID const& account, + AuthType authType) { if (isXRP(issue) || issue.account == account) return tesSUCCESS; + + auto const trustLine = + view.read(keylet::line(account, issue.account, issue.currency)); + // If account has no line, and this is a strong check, fail + if (!trustLine && authType == AuthType::StrongAuth) + return tecNO_LINE; + + // If this is a weak or legacy check, or if the account has a line, fail if + // auth is required and not set on the line if (auto const issuerAccount = view.read(keylet::account(issue.account)); issuerAccount && (*issuerAccount)[sfFlags] & lsfRequireAuth) { - if (auto const trustLine = - view.read(keylet::line(account, issue.account, issue.currency))) + if (trustLine) return ((*trustLine)[sfFlags] & ((account > issue.account) ? lsfLowAuth : lsfHighAuth)) ? tesSUCCESS @@ -2322,7 +2334,7 @@ requireAuth( ReadView const& view, MPTIssue const& mptIssue, AccountID const& account, - MPTAuthType authType, + AuthType authType, int depth) { auto const mptID = keylet::mptIssuance(mptIssue.getMptID()); @@ -2357,7 +2369,7 @@ requireAuth( if (auto const err = std::visit( [&](TIss const& issue) { if constexpr (std::is_same_v) - return requireAuth(view, issue, account); + return requireAuth(view, issue, account, authType); else return requireAuth( view, issue, account, authType, depth + 1); @@ -2372,7 +2384,8 @@ requireAuth( auto const sleToken = view.read(mptokenID); // if account has no MPToken, fail - if (!sleToken && authType == MPTAuthType::StrongAuth) + if (!sleToken && + (authType == AuthType::StrongAuth || authType == AuthType::Legacy)) return tecNO_AUTH; // Note, this check is not amendment-gated because DomainID will be always From 921aef9934fba3a947fe3d6d38de756abd3a6adc Mon Sep 17 00:00:00 2001 From: Bart Date: Fri, 25 Jul 2025 11:54:02 -0400 Subject: [PATCH 7/7] Updates Conan dependencies: Boost 1.86 (#5264) --- BUILD.md | 57 +-------------------------- conan/profiles/default | 3 -- conanfile.py | 2 +- external/soci/conanfile.py | 4 +- include/xrpl/basics/Log.h | 1 + src/libxrpl/basics/FileUtilities.cpp | 6 ++- src/test/jtx/TrustedPublisherServer.h | 13 +++--- src/test/server/ServerStatus_test.cpp | 2 +- src/test/unit_test/FileDirGuard.h | 2 + src/xrpld/app/main/Application.cpp | 2 +- src/xrpld/app/main/Main.cpp | 2 +- 11 files changed, 21 insertions(+), 73 deletions(-) diff --git a/BUILD.md b/BUILD.md index 8e2d5ffb2b..072e38af93 100644 --- a/BUILD.md +++ b/BUILD.md @@ -370,18 +370,13 @@ and can be helpful for detecting `#include` omissions. ## Troubleshooting - ### Conan After any updates or changes to dependencies, you may need to do the following: 1. Remove your build directory. -2. Remove the Conan cache: - ``` - rm -rf ~/.conan/data - ``` -4. Re-run [conan install](#build-and-test). - +2. Remove the Conan cache: `conan remove "*" -c` +3. Re-run [conan install](#build-and-test). ### 'protobuf/port_def.inc' file not found @@ -399,54 +394,6 @@ For example, if you want to build Debug: 1. For conan install, pass `--settings build_type=Debug` 2. For cmake, pass `-DCMAKE_BUILD_TYPE=Debug` - -### no std::result_of - -If your compiler version is recent enough to have removed `std::result_of` as -part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor -definition to your build. - -``` -conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default -conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default -``` - - -### call to 'async_teardown' is ambiguous - -If you are compiling with an early version of Clang 16, then you might hit -a [regression][6] when compiling C++20 that manifests as an [error in a Boost -header][7]. You can workaround it by adding this preprocessor definition: - -``` -conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default -conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default -``` - - -### recompile with -fPIC - -If you get a linker error suggesting that you recompile Boost with -position-independent code, such as: - -``` -/usr/bin/ld.gold: error: /home/username/.conan/data/boost/1.77.0/_/_/package/.../lib/libboost_container.a(alloc_lib.o): - requires unsupported dynamic reloc 11; recompile with -fPIC -``` - -Conan most likely downloaded a bad binary distribution of the dependency. -This seems to be a [bug][1] in Conan just for Boost 1.77.0 compiled with GCC -for Linux. The solution is to build the dependency locally by passing -`--build boost` when calling `conan install`. - -``` -conan install --build boost ... -``` - - ## Add a Dependency If you want to experiment with a new package, follow these steps: diff --git a/conan/profiles/default b/conan/profiles/default index 0417704f8a..3a7bcda1c6 100644 --- a/conan/profiles/default +++ b/conan/profiles/default @@ -26,9 +26,6 @@ tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] {% if compiler == "apple-clang" and compiler_version >= 17 %} tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw'] {% endif %} -{% if compiler == "clang" and compiler_version == 16 %} -tools.build:cxxflags=['-DBOOST_ASIO_DISABLE_CONCEPTS'] -{% endif %} {% if compiler == "gcc" and compiler_version < 13 %} tools.build:cxxflags=['-Wno-restrict'] {% endif %} diff --git a/conanfile.py b/conanfile.py index bad4dfd111..399c9d6e1f 100644 --- a/conanfile.py +++ b/conanfile.py @@ -104,7 +104,7 @@ class Xrpl(ConanFile): def requirements(self): # Conan 2 requires transitive headers to be specified transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {} - self.requires('boost/1.83.0', force=True, **transitive_headers_opt) + self.requires('boost/1.86.0', force=True, **transitive_headers_opt) self.requires('date/3.0.4', **transitive_headers_opt) self.requires('lz4/1.10.0', force=True) self.requires('protobuf/3.21.12', force=True) diff --git a/external/soci/conanfile.py b/external/soci/conanfile.py index 7e611493d7..fe4c54e53e 100644 --- a/external/soci/conanfile.py +++ b/external/soci/conanfile.py @@ -70,7 +70,7 @@ class SociConan(ConanFile): if self.options.with_postgresql: self.requires("libpq/15.5") if self.options.with_boost: - self.requires("boost/1.83.0") + self.requires("boost/1.86.0") @property def _minimum_compilers_version(self): @@ -154,7 +154,7 @@ class SociConan(ConanFile): self.cpp_info.components["soci_core"].set_property("cmake_target_name", "SOCI::soci_core{}".format(target_suffix)) self.cpp_info.components["soci_core"].libs = ["{}soci_core{}".format(lib_prefix, lib_suffix)] if self.options.with_boost: - self.cpp_info.components["soci_core"].requires.append("boost::boost") + self.cpp_info.components["soci_core"].requires.append("boost::headers") # soci_empty if self.options.empty: diff --git a/include/xrpl/basics/Log.h b/include/xrpl/basics/Log.h index 2506b8ea8d..833907eb9c 100644 --- a/include/xrpl/basics/Log.h +++ b/include/xrpl/basics/Log.h @@ -26,6 +26,7 @@ #include #include +#include #include #include #include diff --git a/src/libxrpl/basics/FileUtilities.cpp b/src/libxrpl/basics/FileUtilities.cpp index 291eb43c7b..ffb9792614 100644 --- a/src/libxrpl/basics/FileUtilities.cpp +++ b/src/libxrpl/basics/FileUtilities.cpp @@ -28,6 +28,7 @@ #include #include +#include #include #include #include @@ -55,7 +56,7 @@ getFileContents( return {}; } - ifstream fileStream(fullPath, std::ios::in); + std::ifstream fileStream(fullPath.string(), std::ios::in); if (!fileStream) { @@ -85,7 +86,8 @@ writeFileContents( using namespace boost::filesystem; using namespace boost::system::errc; - ofstream fileStream(destPath, std::ios::out | std::ios::trunc); + std::ofstream fileStream( + destPath.string(), std::ios::out | std::ios::trunc); if (!fileStream) { diff --git a/src/test/jtx/TrustedPublisherServer.h b/src/test/jtx/TrustedPublisherServer.h index 54538032f5..7bc092cbe3 100644 --- a/src/test/jtx/TrustedPublisherServer.h +++ b/src/test/jtx/TrustedPublisherServer.h @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -220,9 +221,8 @@ public: getList_ = [blob = blob, sig, manifest, version](int interval) { // Build the contents of a version 1 format UNL file std::stringstream l; - l << "{\"blob\":\"" << blob << "\"" - << ",\"signature\":\"" << sig << "\"" - << ",\"manifest\":\"" << manifest << "\"" + l << "{\"blob\":\"" << blob << "\"" << ",\"signature\":\"" << sig + << "\"" << ",\"manifest\":\"" << manifest << "\"" << ",\"refresh_interval\": " << interval << ",\"version\":" << version << '}'; return l.str(); @@ -257,15 +257,14 @@ public: std::stringstream l; for (auto const& info : blobInfo) { - l << "{\"blob\":\"" << info.blob << "\"" - << ",\"signature\":\"" << info.signature << "\"},"; + l << "{\"blob\":\"" << info.blob << "\"" << ",\"signature\":\"" + << info.signature << "\"},"; } std::string blobs = l.str(); blobs.pop_back(); l.str(std::string()); l << "{\"blobs_v2\": [ " << blobs << "],\"manifest\":\"" << manifest - << "\"" - << ",\"refresh_interval\": " << interval + << "\"" << ",\"refresh_interval\": " << interval << ",\"version\":" << (version + 1) << '}'; return l.str(); }; diff --git a/src/test/server/ServerStatus_test.cpp b/src/test/server/ServerStatus_test.cpp index bcd355e301..b27dee6e0a 100644 --- a/src/test/server/ServerStatus_test.cpp +++ b/src/test/server/ServerStatus_test.cpp @@ -681,7 +681,7 @@ class ServerStatus_test : public beast::unit_test::suite, resp["Upgrade"] == "websocket"); BEAST_EXPECT( resp.find("Connection") != resp.end() && - resp["Connection"] == "upgrade"); + resp["Connection"] == "Upgrade"); } void diff --git a/src/test/unit_test/FileDirGuard.h b/src/test/unit_test/FileDirGuard.h index d247ae3015..091bc80d20 100644 --- a/src/test/unit_test/FileDirGuard.h +++ b/src/test/unit_test/FileDirGuard.h @@ -26,6 +26,8 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. #include +#include + namespace ripple { namespace test { namespace detail { diff --git a/src/xrpld/app/main/Application.cpp b/src/xrpld/app/main/Application.cpp index ea0b794116..c824eccfba 100644 --- a/src/xrpld/app/main/Application.cpp +++ b/src/xrpld/app/main/Application.cpp @@ -79,7 +79,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/xrpld/app/main/Main.cpp b/src/xrpld/app/main/Main.cpp index e926a38563..19c8c9910d 100644 --- a/src/xrpld/app/main/Main.cpp +++ b/src/xrpld/app/main/Main.cpp @@ -39,7 +39,7 @@ #include #include -#include +#include #include #include