Merge remote-tracking branch 'upstream/develop' into develop4.6

This commit is contained in:
Mayukha Vadari
2025-07-25 13:58:10 -04:00
27 changed files with 392 additions and 747 deletions

View File

@@ -10,7 +10,6 @@ runs:
shell: bash
run: |
conan export --version 1.1.10 external/snappy
conan export --version 9.7.3 external/rocksdb
conan export --version 4.0.3 external/soci
conan export --version 2.3.1 external/wamr
- name: add Ripple Conan remote
@@ -23,7 +22,6 @@ runs:
fi
conan remote add --index 0 ripple "${CONAN_URL}"
echo "Added conan remote ripple at ${CONAN_URL}"
- name: try to authenticate to Ripple Conan remote
if: env.CONAN_LOGIN_USERNAME_RIPPLE != '' && env.CONAN_PASSWORD_RIPPLE != ''
id: remote
@@ -32,7 +30,6 @@ runs:
echo "Authenticating to ripple remote..."
conan remote auth ripple --force
conan remote list-users
- name: list missing binaries
id: binaries
shell: bash

View File

@@ -24,8 +24,6 @@ env:
CONAN_GLOBAL_CONF: |
core.download:parallel={{os.cpu_count()}}
core.upload:parallel={{os.cpu_count()}}
core:default_build_profile=libxrpl
core:default_profile=libxrpl
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
tools.build:verbosity=verbose
tools.compilation:verbosity=verbose
@@ -95,7 +93,6 @@ jobs:
shell: bash
run: |
conan export --version 1.1.10 external/snappy
conan export --version 9.7.3 external/rocksdb
conan export --version 4.0.3 external/soci
conan export --version 2.3.1 external/wamr
- name: add Ripple Conan remote

View File

@@ -25,8 +25,6 @@ env:
CONAN_GLOBAL_CONF: |
core.download:parallel={{ os.cpu_count() }}
core.upload:parallel={{ os.cpu_count() }}
core:default_build_profile=libxrpl
core:default_profile=libxrpl
tools.build:jobs={{ (os.cpu_count() * 4/5) | int }}
tools.build:verbosity=verbose
tools.compilation:verbosity=verbose
@@ -101,7 +99,6 @@ jobs:
run: tar -czf conan.tar.gz -C ${CONAN_HOME} .
- name: build dependencies
uses: ./.github/actions/dependencies
with:
configuration: ${{ matrix.configuration }}
- name: upload archive
@@ -359,40 +356,44 @@ jobs:
cmake --build .
./example | grep '^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+'
# NOTE we are not using dependencies built above because it lags with
# compiler versions. Instrumentation requires clang version 16 or
# later
instrumentation-build:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
env:
CLANG_RELEASE: 16
needs: dependencies
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/ci/debian-bookworm:clang-16
env:
build_dir: .build
steps:
- name: download cache
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093
with:
name: linux-clang-Debug
- name: extract cache
run: |
mkdir -p ${CONAN_HOME}
tar -xzf conan.tar.gz -C ${CONAN_HOME}
- name: check environment
run: |
echo ${PATH} | tr ':' '\n'
conan --version
cmake --version
env | sort
ls ${CONAN_HOME}
- name: checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- name: dependencies
uses: ./.github/actions/dependencies
with:
configuration: Debug
- name: prepare environment
run: |
mkdir ${GITHUB_WORKSPACE}/.build
echo "SOURCE_DIR=$GITHUB_WORKSPACE" >> $GITHUB_ENV
echo "BUILD_DIR=$GITHUB_WORKSPACE/.build" >> $GITHUB_ENV
- name: configure Conan
run: |
echo "${CONAN_GLOBAL_CONF}" >> $(conan config home)/global.conf
conan config install conan/profiles/ -tf $(conan config home)/profiles/
conan export --version 2.3.1 external/wamr
conan profile show
- name: build dependencies
run: |
cd ${BUILD_DIR}
conan install ${SOURCE_DIR} \
--output-folder ${BUILD_DIR} \
--build missing \
--settings:all build_type=Debug
mkdir -p ${build_dir}
echo "SOURCE_DIR=$(pwd)" >> $GITHUB_ENV
echo "BUILD_DIR=$(pwd)/${build_dir}" >> $GITHUB_ENV
- name: build with instrumentation
run: |

View File

@@ -27,8 +27,6 @@ env:
CONAN_GLOBAL_CONF: |
core.download:parallel={{os.cpu_count()}}
core.upload:parallel={{os.cpu_count()}}
core:default_build_profile=libxrpl
core:default_profile=libxrpl
tools.build:jobs=24
tools.build:verbosity=verbose
tools.compilation:verbosity=verbose
@@ -90,7 +88,6 @@ jobs:
shell: bash
run: |
conan export --version 1.1.10 external/snappy
conan export --version 9.7.3 external/rocksdb
conan export --version 4.0.3 external/soci
conan export --version 2.3.1 external/wamr
- name: add Ripple Conan remote

View File

@@ -172,14 +172,6 @@ which allows you to statically link it with GCC, if you want.
conan export --version 1.1.10 external/snappy
```
Export our [Conan recipe for RocksDB](./external/rocksdb).
It does not override paths to dependencies when building with Visual Studio.
```
# Conan 2.x
conan export --version 9.7.3 external/rocksdb
```
Export our [Conan recipe for SOCI](./external/soci).
It patches their CMake to correctly import its dependencies.
@@ -390,10 +382,7 @@ and can be helpful for detecting `#include` omissions.
After any updates or changes to dependencies, you may need to do the following:
1. Remove your build directory.
2. Remove the Conan cache:
```
rm -rf ~/.conan/data
```
2. Remove the Conan cache: `conan remove "*" -c`
3. Re-run [conan install](#build-and-test).
### 'protobuf/port_def.inc' file not found
@@ -412,50 +401,6 @@ For example, if you want to build Debug:
1. For conan install, pass `--settings build_type=Debug`
2. For cmake, pass `-DCMAKE_BUILD_TYPE=Debug`
### no std::result_of
If your compiler version is recent enough to have removed `std::result_of` as
part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor
definition to your build.
```
conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
```
### call to 'async_teardown' is ambiguous
If you are compiling with an early version of Clang 16, then you might hit
a [regression][6] when compiling C++20 that manifests as an [error in a Boost
header][7]. You can workaround it by adding this preprocessor definition:
```
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default
```
### recompile with -fPIC
If you get a linker error suggesting that you recompile Boost with
position-independent code, such as:
```
/usr/bin/ld.gold: error: /home/username/.conan/data/boost/1.77.0/_/_/package/.../lib/libboost_container.a(alloc_lib.o):
requires unsupported dynamic reloc 11; recompile with -fPIC
```
Conan most likely downloaded a bad binary distribution of the dependency.
This seems to be a [bug][1] in Conan just for Boost 1.77.0 compiled with GCC
for Linux. The solution is to build the dependency locally by passing
`--build boost` when calling `conan install`.
```
conan install --build boost ...
```
## Add a Dependency
If you want to experiment with a new package, follow these steps:

View File

@@ -9,6 +9,7 @@
[settings]
os={{ os }}
arch={{ arch }}
build_type=Debug
compiler={{compiler}}
compiler.version={{ compiler_version }}
compiler.cppstd=20
@@ -17,3 +18,17 @@ compiler.runtime=static
{% else %}
compiler.libcxx={{detect_api.detect_libcxx(compiler, version, compiler_exe)}}
{% endif %}
[conf]
{% if compiler == "clang" and compiler_version >= 19 %}
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "apple-clang" and compiler_version >= 17 %}
tools.build:cxxflags=['-Wno-missing-template-arg-list-after-template-kw']
{% endif %}
{% if compiler == "gcc" and compiler_version < 13 %}
tools.build:cxxflags=['-Wno-restrict']
{% endif %}
[tool_requires]
!cmake/*: cmake/[>=3 <4]

View File

@@ -106,7 +106,7 @@ class Xrpl(ConanFile):
def requirements(self):
# Conan 2 requires transitive headers to be specified
transitive_headers_opt = {'transitive_headers': True} if conan_version.split('.')[0] == '2' else {}
self.requires('boost/1.83.0', force=True, **transitive_headers_opt)
self.requires('boost/1.86.0', force=True, **transitive_headers_opt)
self.requires('date/3.0.4', **transitive_headers_opt)
self.requires('lz4/1.10.0', force=True)
self.requires('protobuf/3.21.12', force=True)
@@ -114,7 +114,7 @@ class Xrpl(ConanFile):
if self.options.jemalloc:
self.requires('jemalloc/5.3.0')
if self.options.rocksdb:
self.requires('rocksdb/9.7.3')
self.requires('rocksdb/10.0.1')
self.requires('xxhash/0.8.3', **transitive_headers_opt)
exports_sources = (
@@ -146,8 +146,6 @@ class Xrpl(ConanFile):
tc.variables['static'] = self.options.static
tc.variables['unity'] = self.options.unity
tc.variables['xrpld'] = self.options.xrpld
if self.settings.compiler == 'clang' and self.settings.compiler.version == 16:
tc.extra_cxxflags = ["-DBOOST_ASIO_DISABLE_CONCEPTS"]
tc.generate()
def build(self):

View File

@@ -1,12 +0,0 @@
sources:
"9.7.3":
url: "https://github.com/facebook/rocksdb/archive/refs/tags/v9.7.3.tar.gz"
sha256: "acfabb989cbfb5b5c4d23214819b059638193ec33dad2d88373c46448d16d38b"
patches:
"9.7.3":
- patch_file: "patches/9.x.x-0001-exclude-thirdparty.patch"
patch_description: "Do not include thirdparty.inc"
patch_type: "portability"
- patch_file: "patches/9.7.3-0001-memory-leak.patch"
patch_description: "Fix a leak of obsolete blob files left open until DB::Close()"
patch_type: "portability"

View File

@@ -1,235 +0,0 @@
import os
import glob
import shutil
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, collect_libs, copy, export_conandata_patches, get, rm, rmdir
from conan.tools.microsoft import check_min_vs, is_msvc, is_msvc_static_runtime
from conan.tools.scm import Version
required_conan_version = ">=1.53.0"
class RocksDBConan(ConanFile):
name = "rocksdb"
description = "A library that provides an embeddable, persistent key-value store for fast storage"
license = ("GPL-2.0-only", "Apache-2.0")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/facebook/rocksdb"
topics = ("database", "leveldb", "facebook", "key-value")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"lite": [True, False],
"with_gflags": [True, False],
"with_snappy": [True, False],
"with_lz4": [True, False],
"with_zlib": [True, False],
"with_zstd": [True, False],
"with_tbb": [True, False],
"with_jemalloc": [True, False],
"enable_sse": [False, "sse42", "avx2"],
"use_rtti": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"lite": False,
"with_snappy": False,
"with_lz4": False,
"with_zlib": False,
"with_zstd": False,
"with_gflags": False,
"with_tbb": False,
"with_jemalloc": False,
"enable_sse": False,
"use_rtti": False,
}
@property
def _min_cppstd(self):
return "11" if Version(self.version) < "8.8.1" else "17"
@property
def _compilers_minimum_version(self):
return {} if self._min_cppstd == "11" else {
"apple-clang": "10",
"clang": "7",
"gcc": "7",
"msvc": "191",
"Visual Studio": "15",
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.arch != "x86_64":
del self.options.with_tbb
if self.settings.build_type == "Debug":
self.options.use_rtti = True # Rtti are used in asserts for debug mode...
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if self.options.with_gflags:
self.requires("gflags/2.2.2")
if self.options.with_snappy:
self.requires("snappy/1.1.10")
if self.options.with_lz4:
self.requires("lz4/1.10.0")
if self.options.with_zlib:
self.requires("zlib/[>=1.2.11 <2]")
if self.options.with_zstd:
self.requires("zstd/1.5.6")
if self.options.get_safe("with_tbb"):
self.requires("onetbb/2021.12.0")
if self.options.with_jemalloc:
self.requires("jemalloc/5.3.0")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, self._min_cppstd)
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support."
)
if self.settings.arch not in ["x86_64", "ppc64le", "ppc64", "mips64", "armv8"]:
raise ConanInvalidConfiguration("Rocksdb requires 64 bits")
check_min_vs(self, "191")
if self.version == "6.20.3" and \
self.settings.os == "Linux" and \
self.settings.compiler == "gcc" and \
Version(self.settings.compiler.version) < "5":
raise ConanInvalidConfiguration("Rocksdb 6.20.3 is not compilable with gcc <5.") # See https://github.com/facebook/rocksdb/issues/3522
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["FAIL_ON_WARNINGS"] = False
tc.variables["WITH_TESTS"] = False
tc.variables["WITH_TOOLS"] = False
tc.variables["WITH_CORE_TOOLS"] = False
tc.variables["WITH_BENCHMARK_TOOLS"] = False
tc.variables["WITH_FOLLY_DISTRIBUTED_MUTEX"] = False
if is_msvc(self):
tc.variables["WITH_MD_LIBRARY"] = not is_msvc_static_runtime(self)
tc.variables["ROCKSDB_INSTALL_ON_WINDOWS"] = self.settings.os == "Windows"
tc.variables["ROCKSDB_LITE"] = self.options.lite
tc.variables["WITH_GFLAGS"] = self.options.with_gflags
tc.variables["WITH_SNAPPY"] = self.options.with_snappy
tc.variables["WITH_LZ4"] = self.options.with_lz4
tc.variables["WITH_ZLIB"] = self.options.with_zlib
tc.variables["WITH_ZSTD"] = self.options.with_zstd
tc.variables["WITH_TBB"] = self.options.get_safe("with_tbb", False)
tc.variables["WITH_JEMALLOC"] = self.options.with_jemalloc
tc.variables["ROCKSDB_BUILD_SHARED"] = self.options.shared
tc.variables["ROCKSDB_LIBRARY_EXPORTS"] = self.settings.os == "Windows" and self.options.shared
tc.variables["ROCKSDB_DLL" ] = self.settings.os == "Windows" and self.options.shared
tc.variables["USE_RTTI"] = self.options.use_rtti
if not bool(self.options.enable_sse):
tc.variables["PORTABLE"] = True
tc.variables["FORCE_SSE42"] = False
elif self.options.enable_sse == "sse42":
tc.variables["PORTABLE"] = True
tc.variables["FORCE_SSE42"] = True
elif self.options.enable_sse == "avx2":
tc.variables["PORTABLE"] = False
tc.variables["FORCE_SSE42"] = False
# not available yet in CCI
tc.variables["WITH_NUMA"] = False
tc.generate()
deps = CMakeDeps(self)
if self.options.with_jemalloc:
deps.set_property("jemalloc", "cmake_file_name", "JeMalloc")
deps.set_property("jemalloc", "cmake_target_name", "JeMalloc::JeMalloc")
if self.options.with_zstd:
deps.set_property("zstd", "cmake_target_name", "zstd::zstd")
deps.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def _remove_static_libraries(self):
rm(self, "rocksdb.lib", os.path.join(self.package_folder, "lib"))
for lib in glob.glob(os.path.join(self.package_folder, "lib", "*.a")):
if not lib.endswith(".dll.a"):
os.remove(lib)
def _remove_cpp_headers(self):
for path in glob.glob(os.path.join(self.package_folder, "include", "rocksdb", "*")):
if path != os.path.join(self.package_folder, "include", "rocksdb", "c.h"):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
def package(self):
copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
copy(self, "LICENSE*", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
if self.options.shared:
self._remove_static_libraries()
self._remove_cpp_headers() # Force stable ABI for shared libraries
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
cmake_target = "rocksdb-shared" if self.options.shared else "rocksdb"
self.cpp_info.set_property("cmake_file_name", "RocksDB")
self.cpp_info.set_property("cmake_target_name", f"RocksDB::{cmake_target}")
# TODO: back to global scope in conan v2 once cmake_find_package* generators removed
self.cpp_info.components["librocksdb"].libs = collect_libs(self)
if self.settings.os == "Windows":
self.cpp_info.components["librocksdb"].system_libs = ["shlwapi", "rpcrt4"]
if self.options.shared:
self.cpp_info.components["librocksdb"].defines = ["ROCKSDB_DLL"]
elif self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["librocksdb"].system_libs = ["pthread", "m"]
if self.options.lite:
self.cpp_info.components["librocksdb"].defines.append("ROCKSDB_LITE")
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.names["cmake_find_package"] = "RocksDB"
self.cpp_info.names["cmake_find_package_multi"] = "RocksDB"
self.cpp_info.components["librocksdb"].names["cmake_find_package"] = cmake_target
self.cpp_info.components["librocksdb"].names["cmake_find_package_multi"] = cmake_target
self.cpp_info.components["librocksdb"].set_property("cmake_target_name", f"RocksDB::{cmake_target}")
if self.options.with_gflags:
self.cpp_info.components["librocksdb"].requires.append("gflags::gflags")
if self.options.with_snappy:
self.cpp_info.components["librocksdb"].requires.append("snappy::snappy")
if self.options.with_lz4:
self.cpp_info.components["librocksdb"].requires.append("lz4::lz4")
if self.options.with_zlib:
self.cpp_info.components["librocksdb"].requires.append("zlib::zlib")
if self.options.with_zstd:
self.cpp_info.components["librocksdb"].requires.append("zstd::zstd")
if self.options.get_safe("with_tbb"):
self.cpp_info.components["librocksdb"].requires.append("onetbb::onetbb")
if self.options.with_jemalloc:
self.cpp_info.components["librocksdb"].requires.append("jemalloc::jemalloc")

View File

@@ -1,319 +0,0 @@
diff --git a/HISTORY.md b/HISTORY.md
index 36d472229..05ad1a202 100644
--- a/HISTORY.md
+++ b/HISTORY.md
@@ -1,6 +1,10 @@
# Rocksdb Change Log
> NOTE: Entries for next release do not go here. Follow instructions in `unreleased_history/README.txt`
+## 9.7.4 (10/31/2024)
+### Bug Fixes
+* Fix a leak of obsolete blob files left open until DB::Close(). This bug was introduced in version 9.4.0.
+
## 9.7.3 (10/16/2024)
### Behavior Changes
* OPTIONS file to be loaded by remote worker is now preserved so that it does not get purged by the primary host. A similar technique as how we are preserving new SST files from getting purged is used for this. min_options_file_numbers_ is tracked like pending_outputs_ is tracked.
diff --git a/db/blob/blob_file_cache.cc b/db/blob/blob_file_cache.cc
index 5f340aadf..1b9faa238 100644
--- a/db/blob/blob_file_cache.cc
+++ b/db/blob/blob_file_cache.cc
@@ -42,6 +42,7 @@ Status BlobFileCache::GetBlobFileReader(
assert(blob_file_reader);
assert(blob_file_reader->IsEmpty());
+ // NOTE: sharing same Cache with table_cache
const Slice key = GetSliceForKey(&blob_file_number);
assert(cache_);
@@ -98,4 +99,13 @@ Status BlobFileCache::GetBlobFileReader(
return Status::OK();
}
+void BlobFileCache::Evict(uint64_t blob_file_number) {
+ // NOTE: sharing same Cache with table_cache
+ const Slice key = GetSliceForKey(&blob_file_number);
+
+ assert(cache_);
+
+ cache_.get()->Erase(key);
+}
+
} // namespace ROCKSDB_NAMESPACE
diff --git a/db/blob/blob_file_cache.h b/db/blob/blob_file_cache.h
index 740e67ada..6858d012b 100644
--- a/db/blob/blob_file_cache.h
+++ b/db/blob/blob_file_cache.h
@@ -36,6 +36,15 @@ class BlobFileCache {
uint64_t blob_file_number,
CacheHandleGuard<BlobFileReader>* blob_file_reader);
+ // Called when a blob file is obsolete to ensure it is removed from the cache
+ // to avoid effectively leaking the open file and assicated memory
+ void Evict(uint64_t blob_file_number);
+
+ // Used to identify cache entries for blob files (not normally useful)
+ static const Cache::CacheItemHelper* GetHelper() {
+ return CacheInterface::GetBasicHelper();
+ }
+
private:
using CacheInterface =
BasicTypedCacheInterface<BlobFileReader, CacheEntryRole::kMisc>;
diff --git a/db/column_family.h b/db/column_family.h
index e4b7adde8..86637736a 100644
--- a/db/column_family.h
+++ b/db/column_family.h
@@ -401,6 +401,7 @@ class ColumnFamilyData {
SequenceNumber earliest_seq);
TableCache* table_cache() const { return table_cache_.get(); }
+ BlobFileCache* blob_file_cache() const { return blob_file_cache_.get(); }
BlobSource* blob_source() const { return blob_source_.get(); }
// See documentation in compaction_picker.h
diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc
index 261593423..06573ac2e 100644
--- a/db/db_impl/db_impl.cc
+++ b/db/db_impl/db_impl.cc
@@ -659,8 +659,9 @@ Status DBImpl::CloseHelper() {
// We need to release them before the block cache is destroyed. The block
// cache may be destroyed inside versions_.reset(), when column family data
// list is destroyed, so leaving handles in table cache after
- // versions_.reset() may cause issues.
- // Here we clean all unreferenced handles in table cache.
+ // versions_.reset() may cause issues. Here we clean all unreferenced handles
+ // in table cache, and (for certain builds/conditions) assert that no obsolete
+ // files are hanging around unreferenced (leak) in the table/blob file cache.
// Now we assume all user queries have finished, so only version set itself
// can possibly hold the blocks from block cache. After releasing unreferenced
// handles here, only handles held by version set left and inside
@@ -668,6 +669,9 @@ Status DBImpl::CloseHelper() {
// time a handle is released, we erase it from the cache too. By doing that,
// we can guarantee that after versions_.reset(), table cache is empty
// so the cache can be safely destroyed.
+#ifndef NDEBUG
+ TEST_VerifyNoObsoleteFilesCached(/*db_mutex_already_held=*/true);
+#endif // !NDEBUG
table_cache_->EraseUnRefEntries();
for (auto& txn_entry : recovered_transactions_) {
@@ -3227,6 +3231,8 @@ Status DBImpl::MultiGetImpl(
s = Status::Aborted();
break;
}
+ // This could be a long-running operation
+ ROCKSDB_THREAD_YIELD_HOOK();
}
// Post processing (decrement reference counts and record statistics)
diff --git a/db/db_impl/db_impl.h b/db/db_impl/db_impl.h
index 5e4fa310b..ccc0abfa7 100644
--- a/db/db_impl/db_impl.h
+++ b/db/db_impl/db_impl.h
@@ -1241,9 +1241,14 @@ class DBImpl : public DB {
static Status TEST_ValidateOptions(const DBOptions& db_options) {
return ValidateOptions(db_options);
}
-
#endif // NDEBUG
+ // In certain configurations, verify that the table/blob file cache only
+ // contains entries for live files, to check for effective leaks of open
+ // files. This can only be called when purging of obsolete files has
+ // "settled," such as during parts of DB Close().
+ void TEST_VerifyNoObsoleteFilesCached(bool db_mutex_already_held) const;
+
// persist stats to column family "_persistent_stats"
void PersistStats();
diff --git a/db/db_impl/db_impl_debug.cc b/db/db_impl/db_impl_debug.cc
index 790a50d7a..67f5b4aaf 100644
--- a/db/db_impl/db_impl_debug.cc
+++ b/db/db_impl/db_impl_debug.cc
@@ -9,6 +9,7 @@
#ifndef NDEBUG
+#include "db/blob/blob_file_cache.h"
#include "db/column_family.h"
#include "db/db_impl/db_impl.h"
#include "db/error_handler.h"
@@ -328,5 +329,49 @@ size_t DBImpl::TEST_EstimateInMemoryStatsHistorySize() const {
InstrumentedMutexLock l(&const_cast<DBImpl*>(this)->stats_history_mutex_);
return EstimateInMemoryStatsHistorySize();
}
+
+void DBImpl::TEST_VerifyNoObsoleteFilesCached(
+ bool db_mutex_already_held) const {
+ // This check is somewhat expensive and obscure to make a part of every
+ // unit test in every build variety. Thus, we only enable it for ASAN builds.
+ if (!kMustFreeHeapAllocations) {
+ return;
+ }
+
+ std::optional<InstrumentedMutexLock> l;
+ if (db_mutex_already_held) {
+ mutex_.AssertHeld();
+ } else {
+ l.emplace(&mutex_);
+ }
+
+ std::vector<uint64_t> live_files;
+ for (auto cfd : *versions_->GetColumnFamilySet()) {
+ if (cfd->IsDropped()) {
+ continue;
+ }
+ // Sneakily add both SST and blob files to the same list
+ cfd->current()->AddLiveFiles(&live_files, &live_files);
+ }
+ std::sort(live_files.begin(), live_files.end());
+
+ auto fn = [&live_files](const Slice& key, Cache::ObjectPtr, size_t,
+ const Cache::CacheItemHelper* helper) {
+ if (helper != BlobFileCache::GetHelper()) {
+ // Skip non-blob files for now
+ // FIXME: diagnose and fix the leaks of obsolete SST files revealed in
+ // unit tests.
+ return;
+ }
+ // See TableCache and BlobFileCache
+ assert(key.size() == sizeof(uint64_t));
+ uint64_t file_number;
+ GetUnaligned(reinterpret_cast<const uint64_t*>(key.data()), &file_number);
+ // Assert file is in sorted live_files
+ assert(
+ std::binary_search(live_files.begin(), live_files.end(), file_number));
+ };
+ table_cache_->ApplyToAllEntries(fn, {});
+}
} // namespace ROCKSDB_NAMESPACE
#endif // NDEBUG
diff --git a/db/db_iter.cc b/db/db_iter.cc
index e02586377..bf4749eb9 100644
--- a/db/db_iter.cc
+++ b/db/db_iter.cc
@@ -540,6 +540,8 @@ bool DBIter::FindNextUserEntryInternal(bool skipping_saved_key,
} else {
iter_.Next();
}
+ // This could be a long-running operation due to tombstones, etc.
+ ROCKSDB_THREAD_YIELD_HOOK();
} while (iter_.Valid());
valid_ = false;
diff --git a/db/table_cache.cc b/db/table_cache.cc
index 71fc29c32..8a5be75e8 100644
--- a/db/table_cache.cc
+++ b/db/table_cache.cc
@@ -164,6 +164,7 @@ Status TableCache::GetTableReader(
}
Cache::Handle* TableCache::Lookup(Cache* cache, uint64_t file_number) {
+ // NOTE: sharing same Cache with BlobFileCache
Slice key = GetSliceForFileNumber(&file_number);
return cache->Lookup(key);
}
@@ -179,6 +180,7 @@ Status TableCache::FindTable(
size_t max_file_size_for_l0_meta_pin, Temperature file_temperature) {
PERF_TIMER_GUARD_WITH_CLOCK(find_table_nanos, ioptions_.clock);
uint64_t number = file_meta.fd.GetNumber();
+ // NOTE: sharing same Cache with BlobFileCache
Slice key = GetSliceForFileNumber(&number);
*handle = cache_.Lookup(key);
TEST_SYNC_POINT_CALLBACK("TableCache::FindTable:0",
diff --git a/db/version_builder.cc b/db/version_builder.cc
index ed8ab8214..c98f53f42 100644
--- a/db/version_builder.cc
+++ b/db/version_builder.cc
@@ -24,6 +24,7 @@
#include <vector>
#include "cache/cache_reservation_manager.h"
+#include "db/blob/blob_file_cache.h"
#include "db/blob/blob_file_meta.h"
#include "db/dbformat.h"
#include "db/internal_stats.h"
@@ -744,12 +745,9 @@ class VersionBuilder::Rep {
return Status::Corruption("VersionBuilder", oss.str());
}
- // Note: we use C++11 for now but in C++14, this could be done in a more
- // elegant way using generalized lambda capture.
- VersionSet* const vs = version_set_;
- const ImmutableCFOptions* const ioptions = ioptions_;
-
- auto deleter = [vs, ioptions](SharedBlobFileMetaData* shared_meta) {
+ auto deleter = [vs = version_set_, ioptions = ioptions_,
+ bc = cfd_ ? cfd_->blob_file_cache()
+ : nullptr](SharedBlobFileMetaData* shared_meta) {
if (vs) {
assert(ioptions);
assert(!ioptions->cf_paths.empty());
@@ -758,6 +756,9 @@ class VersionBuilder::Rep {
vs->AddObsoleteBlobFile(shared_meta->GetBlobFileNumber(),
ioptions->cf_paths.front().path);
}
+ if (bc) {
+ bc->Evict(shared_meta->GetBlobFileNumber());
+ }
delete shared_meta;
};
@@ -766,7 +767,7 @@ class VersionBuilder::Rep {
blob_file_number, blob_file_addition.GetTotalBlobCount(),
blob_file_addition.GetTotalBlobBytes(),
blob_file_addition.GetChecksumMethod(),
- blob_file_addition.GetChecksumValue(), deleter);
+ blob_file_addition.GetChecksumValue(), std::move(deleter));
mutable_blob_file_metas_.emplace(
blob_file_number, MutableBlobFileMetaData(std::move(shared_meta)));
diff --git a/db/version_set.h b/db/version_set.h
index 9336782b1..024f869e7 100644
--- a/db/version_set.h
+++ b/db/version_set.h
@@ -1514,7 +1514,6 @@ class VersionSet {
void GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata);
void AddObsoleteBlobFile(uint64_t blob_file_number, std::string path) {
- // TODO: Erase file from BlobFileCache?
obsolete_blob_files_.emplace_back(blob_file_number, std::move(path));
}
diff --git a/include/rocksdb/version.h b/include/rocksdb/version.h
index 2a19796b8..0afa2cab1 100644
--- a/include/rocksdb/version.h
+++ b/include/rocksdb/version.h
@@ -13,7 +13,7 @@
// minor or major version number planned for release.
#define ROCKSDB_MAJOR 9
#define ROCKSDB_MINOR 7
-#define ROCKSDB_PATCH 3
+#define ROCKSDB_PATCH 4
// Do not use these. We made the mistake of declaring macros starting with
// double underscore. Now we have to live with our choice. We'll deprecate these
diff --git a/port/port.h b/port/port.h
index 13aa56d47..141716e5b 100644
--- a/port/port.h
+++ b/port/port.h
@@ -19,3 +19,19 @@
#elif defined(OS_WIN)
#include "port/win/port_win.h"
#endif
+
+#ifdef OS_LINUX
+// A temporary hook into long-running RocksDB threads to support modifying their
+// priority etc. This should become a public API hook once the requirements
+// are better understood.
+extern "C" void RocksDbThreadYield() __attribute__((__weak__));
+#define ROCKSDB_THREAD_YIELD_HOOK() \
+ { \
+ if (RocksDbThreadYield) { \
+ RocksDbThreadYield(); \
+ } \
+ }
+#else
+#define ROCKSDB_THREAD_YIELD_HOOK() \
+ {}
+#endif

View File

@@ -1,30 +0,0 @@
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 93b884d..b715cb6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -106,14 +106,9 @@ endif()
include(CMakeDependentOption)
if(MSVC)
- option(WITH_GFLAGS "build with GFlags" OFF)
option(WITH_XPRESS "build with windows built in compression" OFF)
- option(ROCKSDB_SKIP_THIRDPARTY "skip thirdparty.inc" OFF)
-
- if(NOT ROCKSDB_SKIP_THIRDPARTY)
- include(${CMAKE_CURRENT_SOURCE_DIR}/thirdparty.inc)
- endif()
-else()
+endif()
+if(TRUE)
if(CMAKE_SYSTEM_NAME MATCHES "FreeBSD" AND NOT CMAKE_SYSTEM_NAME MATCHES "kFreeBSD")
# FreeBSD has jemalloc as default malloc
# but it does not have all the jemalloc files in include/...
@@ -126,7 +121,7 @@ else()
endif()
endif()
- if(MINGW)
+ if(MSVC OR MINGW)
option(WITH_GFLAGS "build with GFlags" OFF)
else()
option(WITH_GFLAGS "build with GFlags" ON)

View File

@@ -70,7 +70,7 @@ class SociConan(ConanFile):
if self.options.with_postgresql:
self.requires("libpq/15.5")
if self.options.with_boost:
self.requires("boost/1.83.0")
self.requires("boost/1.86.0")
@property
def _minimum_compilers_version(self):
@@ -154,7 +154,7 @@ class SociConan(ConanFile):
self.cpp_info.components["soci_core"].set_property("cmake_target_name", "SOCI::soci_core{}".format(target_suffix))
self.cpp_info.components["soci_core"].libs = ["{}soci_core{}".format(lib_prefix, lib_suffix)]
if self.options.with_boost:
self.cpp_info.components["soci_core"].requires.append("boost::boost")
self.cpp_info.components["soci_core"].requires.append("boost::headers")
# soci_empty
if self.options.empty:

View File

@@ -26,6 +26,7 @@
#include <boost/beast/core/string.hpp>
#include <boost/filesystem.hpp>
#include <fstream>
#include <map>
#include <memory>
#include <mutex>

View File

@@ -512,6 +512,7 @@ TRANSACTION(ttVAULT_WITHDRAW, 69, VaultWithdraw, Delegation::delegatable, ({
{sfVaultID, soeREQUIRED},
{sfAmount, soeREQUIRED, soeMPTSupported},
{sfDestination, soeOPTIONAL},
{sfDestinationTag, soeOPTIONAL},
}))
/** This transaction claws back tokens from a vault. */

View File

@@ -28,6 +28,7 @@
#include <cerrno>
#include <cstddef>
#include <fstream>
#include <ios>
#include <iterator>
#include <optional>
@@ -55,7 +56,7 @@ getFileContents(
return {};
}
ifstream fileStream(fullPath, std::ios::in);
std::ifstream fileStream(fullPath.string(), std::ios::in);
if (!fileStream)
{
@@ -85,7 +86,8 @@ writeFileContents(
using namespace boost::filesystem;
using namespace boost::system::errc;
ofstream fileStream(destPath, std::ios::out | std::ios::trunc);
std::ofstream fileStream(
destPath.string(), std::ios::out | std::ios::trunc);
if (!fileStream)
{

View File

@@ -234,6 +234,28 @@ class Vault_test : public beast::unit_test::suite
env(tx, ter{tecNO_PERMISSION});
}
{
testcase(prefix + " fail to withdraw to zero destination");
auto tx = vault.withdraw(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(1000)});
tx[sfDestination] = "0";
env(tx, ter(temMALFORMED));
}
{
testcase(
prefix +
" fail to withdraw with tag but without destination");
auto tx = vault.withdraw(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(1000)});
tx[sfDestinationTag] = "0";
env(tx, ter(temMALFORMED));
}
if (!asset.raw().native())
{
testcase(
@@ -1335,6 +1357,7 @@ class Vault_test : public beast::unit_test::suite
struct CaseArgs
{
bool enableClawback = true;
bool requireAuth = true;
};
auto testCase = [this](
@@ -1356,16 +1379,20 @@ class Vault_test : public beast::unit_test::suite
Vault vault{env};
MPTTester mptt{env, issuer, mptInitNoFund};
auto const none = LedgerSpecificFlags(0);
mptt.create(
{.flags = tfMPTCanTransfer | tfMPTCanLock |
(args.enableClawback ? lsfMPTCanClawback
: LedgerSpecificFlags(0)) |
tfMPTRequireAuth});
(args.enableClawback ? tfMPTCanClawback : none) |
(args.requireAuth ? tfMPTRequireAuth : none)});
PrettyAsset asset = mptt.issuanceID();
mptt.authorize({.account = owner});
mptt.authorize({.account = issuer, .holder = owner});
mptt.authorize({.account = depositor});
mptt.authorize({.account = issuer, .holder = depositor});
if (args.requireAuth)
{
mptt.authorize({.account = issuer, .holder = owner});
mptt.authorize({.account = issuer, .holder = depositor});
}
env(pay(issuer, depositor, asset(1000)));
env.close();
@@ -1514,6 +1541,100 @@ class Vault_test : public beast::unit_test::suite
}
});
testCase(
[this](
Env& env,
Account const& issuer,
Account const& owner,
Account const& depositor,
PrettyAsset const& asset,
Vault& vault,
MPTTester& mptt) {
testcase(
"MPT 3rd party without MPToken cannot be withdrawal "
"destination");
auto [tx, keylet] =
vault.create({.owner = owner, .asset = asset});
env(tx);
env.close();
tx = vault.deposit(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(100)});
env(tx);
env.close();
{
// Set destination to 3rd party without MPToken
Account charlie{"charlie"};
env.fund(XRP(1000), charlie);
env.close();
tx = vault.withdraw(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(100)});
tx[sfDestination] = charlie.human();
env(tx, ter(tecNO_AUTH));
}
},
{.requireAuth = false});
testCase(
[this](
Env& env,
Account const& issuer,
Account const& owner,
Account const& depositor,
PrettyAsset const& asset,
Vault& vault,
MPTTester& mptt) {
testcase("MPT depositor without MPToken cannot withdraw");
auto [tx, keylet] =
vault.create({.owner = owner, .asset = asset});
env(tx);
env.close();
tx = vault.deposit(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(1000)});
env(tx);
env.close();
{
// Remove depositor's MPToken and withdraw will fail
mptt.authorize(
{.account = depositor, .flags = tfMPTUnauthorize});
env.close();
auto const mptoken =
env.le(keylet::mptoken(mptt.issuanceID(), depositor));
BEAST_EXPECT(mptoken == nullptr);
tx = vault.withdraw(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(100)});
env(tx, ter(tecNO_AUTH));
}
{
// Restore depositor's MPToken and withdraw will succeed
mptt.authorize({.account = depositor});
env.close();
tx = vault.withdraw(
{.depositor = depositor,
.id = keylet.key,
.amount = asset(100)});
env(tx);
}
},
{.requireAuth = false});
testCase([this](
Env& env,
Account const& issuer,
@@ -1803,6 +1924,7 @@ class Vault_test : public beast::unit_test::suite
PrettyAsset const asset = issuer["IOU"];
env.trust(asset(1000), owner);
env.trust(asset(1000), charlie);
env(pay(issuer, owner, asset(200)));
env(rate(issuer, 1.25));
env.close();
@@ -2118,6 +2240,79 @@ class Vault_test : public beast::unit_test::suite
env.close();
});
testCase([&, this](
Env& env,
Account const& owner,
Account const& issuer,
Account const& charlie,
auto,
Vault& vault,
PrettyAsset const& asset,
auto&&...) {
testcase("IOU no trust line to 3rd party");
auto [tx, keylet] = vault.create({.owner = owner, .asset = asset});
env(tx);
env.close();
env(vault.deposit(
{.depositor = owner, .id = keylet.key, .amount = asset(100)}));
env.close();
Account const erin{"erin"};
env.fund(XRP(1000), erin);
env.close();
// Withdraw to 3rd party without trust line
auto const tx1 = [&](ripple::Keylet keylet) {
auto tx = vault.withdraw(
{.depositor = owner,
.id = keylet.key,
.amount = asset(10)});
tx[sfDestination] = erin.human();
return tx;
}(keylet);
env(tx1, ter{tecNO_LINE});
});
testCase([&, this](
Env& env,
Account const& owner,
Account const& issuer,
Account const& charlie,
auto,
Vault& vault,
PrettyAsset const& asset,
auto&&...) {
testcase("IOU no trust line to depositor");
auto [tx, keylet] = vault.create({.owner = owner, .asset = asset});
env(tx);
env.close();
// reset limit, so deposit of all funds will delete the trust line
env.trust(asset(0), owner);
env.close();
env(vault.deposit(
{.depositor = owner, .id = keylet.key, .amount = asset(200)}));
env.close();
auto trustline =
env.le(keylet::line(owner, asset.raw().get<Issue>()));
BEAST_EXPECT(trustline == nullptr);
// Withdraw without trust line, will succeed
auto const tx1 = [&](ripple::Keylet keylet) {
auto tx = vault.withdraw(
{.depositor = owner,
.id = keylet.key,
.amount = asset(10)});
return tx;
}(keylet);
env(tx1);
});
testCase([&, this](
Env& env,
Account const& owner,

View File

@@ -33,6 +33,7 @@
#include <boost/asio.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/ssl/stream.hpp>
#include <boost/beast/core/flat_buffer.hpp>
#include <boost/beast/http.hpp>
#include <boost/beast/ssl.hpp>
#include <boost/beast/version.hpp>

View File

@@ -131,6 +131,9 @@ public:
BEAST_EXPECT(jv.isMember(jss::id) && jv[jss::id] == 5);
}
BEAST_EXPECT(jv[jss::result][jss::ledger_index] == 2);
BEAST_EXPECT(
jv[jss::result][jss::network_id] ==
env.app().config().NETWORK_ID);
}
{
@@ -139,7 +142,8 @@ public:
// Check stream update
BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) {
return jv[jss::ledger_index] == 3;
return jv[jss::ledger_index] == 3 &&
jv[jss::network_id] == env.app().config().NETWORK_ID;
}));
}
@@ -149,7 +153,8 @@ public:
// Check stream update
BEAST_EXPECT(wsc->findMsg(5s, [&](auto const& jv) {
return jv[jss::ledger_index] == 4;
return jv[jss::ledger_index] == 4 &&
jv[jss::network_id] == env.app().config().NETWORK_ID;
}));
}
@@ -509,6 +514,11 @@ public:
if (!jv.isMember(jss::validated_hash))
return false;
uint32_t netID = env.app().config().NETWORK_ID;
if (!jv.isMember(jss::network_id) ||
jv[jss::network_id] != netID)
return false;
// Certain fields are only added on a flag ledger.
bool const isFlagLedger =
(env.closed()->info().seq + 1) % 256 == 0;
@@ -583,6 +593,7 @@ public:
jv[jss::streams][0u] = "ledger";
jr = env.rpc("json", "subscribe", to_string(jv))[jss::result];
BEAST_EXPECT(jr[jss::status] == "success");
BEAST_EXPECT(jr[jss::network_id] == env.app().config().NETWORK_ID);
jr = env.rpc("json", "unsubscribe", to_string(jv))[jss::result];
BEAST_EXPECT(jr[jss::status] == "success");

View File

@@ -681,7 +681,7 @@ class ServerStatus_test : public beast::unit_test::suite,
resp["Upgrade"] == "websocket");
BEAST_EXPECT(
resp.find("Connection") != resp.end() &&
resp["Connection"] == "upgrade");
resp["Connection"] == "Upgrade");
}
void

View File

@@ -26,6 +26,8 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <boost/filesystem.hpp>
#include <fstream>
namespace ripple {
namespace test {
namespace detail {

View File

@@ -79,7 +79,7 @@
#include <chrono>
#include <condition_variable>
#include <cstring>
#include <iostream>
#include <fstream>
#include <limits>
#include <mutex>
#include <optional>

View File

@@ -39,7 +39,7 @@
#include <google/protobuf/stubs/common.h>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <stdexcept>
#include <utility>

View File

@@ -2415,6 +2415,7 @@ NetworkOPsImp::pubValidation(std::shared_ptr<STValidation> const& val)
jvObj[jss::flags] = val->getFlags();
jvObj[jss::signing_time] = *(*val)[~sfSigningTime];
jvObj[jss::data] = strHex(val->getSerializer().slice());
jvObj[jss::network_id] = app_.config().NETWORK_ID;
if (auto version = (*val)[~sfServerVersion])
jvObj[jss::server_version] = std::to_string(*version);
@@ -3148,6 +3149,8 @@ NetworkOPsImp::pubLedger(std::shared_ptr<ReadView const> const& lpAccepted)
jvObj[jss::ledger_time] = Json::Value::UInt(
lpAccepted->info().closeTime.time_since_epoch().count());
jvObj[jss::network_id] = app_.config().NETWORK_ID;
if (!lpAccepted->rules().enabled(featureXRPFees))
jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped();
@@ -4208,6 +4211,7 @@ NetworkOPsImp::subLedger(InfoSub::ref isrListener, Json::Value& jvResult)
jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash);
jvResult[jss::ledger_time] = Json::Value::UInt(
lpClosed->info().closeTime.time_since_epoch().count());
jvResult[jss::network_id] = app_.config().NETWORK_ID;
if (!lpClosed->rules().enabled(featureXRPFees))
jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED;
jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped();

View File

@@ -366,14 +366,14 @@ escrowCreatePreclaimHelper<MPTIssue>(
// authorized
auto const& mptIssue = amount.get<MPTIssue>();
if (auto const ter =
requireAuth(ctx.view, mptIssue, account, MPTAuthType::WeakAuth);
requireAuth(ctx.view, mptIssue, account, AuthType::WeakAuth);
ter != tesSUCCESS)
return ter;
// If the issuer has requireAuth set, check if the destination is
// authorized
if (auto const ter =
requireAuth(ctx.view, mptIssue, dest, MPTAuthType::WeakAuth);
requireAuth(ctx.view, mptIssue, dest, AuthType::WeakAuth);
ter != tesSUCCESS)
return ter;
@@ -831,7 +831,7 @@ escrowFinishPreclaimHelper<MPTIssue>(
// authorized
auto const& mptIssue = amount.get<MPTIssue>();
if (auto const ter =
requireAuth(ctx.view, mptIssue, dest, MPTAuthType::WeakAuth);
requireAuth(ctx.view, mptIssue, dest, AuthType::WeakAuth);
ter != tesSUCCESS)
return ter;
@@ -1447,7 +1447,7 @@ escrowCancelPreclaimHelper<MPTIssue>(
// authorized
auto const& mptIssue = amount.get<MPTIssue>();
if (auto const ter =
requireAuth(ctx.view, mptIssue, account, MPTAuthType::WeakAuth);
requireAuth(ctx.view, mptIssue, account, AuthType::WeakAuth);
ter != tesSUCCESS)
return ter;

View File

@@ -52,9 +52,19 @@ VaultWithdraw::preflight(PreflightContext const& ctx)
return temBAD_AMOUNT;
if (auto const destination = ctx.tx[~sfDestination];
destination && *destination == beast::zero)
destination.has_value())
{
JLOG(ctx.j.debug()) << "VaultWithdraw: zero/empty destination account.";
if (*destination == beast::zero)
{
JLOG(ctx.j.debug())
<< "VaultWithdraw: zero/empty destination account.";
return temMALFORMED;
}
}
else if (ctx.tx.isFieldPresent(sfDestinationTag))
{
JLOG(ctx.j.debug()) << "VaultWithdraw: sfDestinationTag is set but "
"sfDestination is not";
return temMALFORMED;
}
@@ -123,33 +133,39 @@ VaultWithdraw::preclaim(PreclaimContext const& ctx)
// Withdrawal to a 3rd party destination account is essentially a transfer,
// via shares in the vault. Enforce all the usual asset transfer checks.
AuthType authType = AuthType::Legacy;
if (account != dstAcct)
{
auto const sleDst = ctx.view.read(keylet::account(dstAcct));
if (sleDst == nullptr)
return tecNO_DST;
if (sleDst->getFlags() & lsfRequireDestTag)
if (sleDst->isFlag(lsfRequireDestTag) &&
!ctx.tx.isFieldPresent(sfDestinationTag))
return tecDST_TAG_NEEDED; // Cannot send without a tag
if (sleDst->getFlags() & lsfDepositAuth)
if (sleDst->isFlag(lsfDepositAuth))
{
if (!ctx.view.exists(keylet::depositPreauth(dstAcct, account)))
return tecNO_PERMISSION;
}
// The destination account must have consented to receive the asset by
// creating a RippleState or MPToken
authType = AuthType::StrongAuth;
}
// Destination MPToken must exist (if asset is an MPT)
if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct);
// Destination MPToken (for an MPT) or trust line (for an IOU) must exist
// if not sending to Account.
if (auto const ter = requireAuth(ctx.view, vaultAsset, dstAcct, authType);
!isTesSuccess(ter))
return ter;
// Cannot withdraw from a Vault an Asset frozen for the destination account
if (isFrozen(ctx.view, dstAcct, vaultAsset))
return vaultAsset.holds<Issue>() ? tecFROZEN : tecLOCKED;
if (auto const ret = checkFrozen(ctx.view, dstAcct, vaultAsset))
return ret;
if (isFrozen(ctx.view, account, vaultShare))
return tecLOCKED;
if (auto const ret = checkFrozen(ctx.view, account, vaultShare))
return ret;
return tesSUCCESS;
}

View File

@@ -175,6 +175,29 @@ isFrozen(
asset.value());
}
[[nodiscard]] inline TER
checkFrozen(ReadView const& view, AccountID const& account, Issue const& issue)
{
return isFrozen(view, account, issue) ? (TER)tecFROZEN : (TER)tesSUCCESS;
}
[[nodiscard]] inline TER
checkFrozen(
ReadView const& view,
AccountID const& account,
MPTIssue const& mptIssue)
{
return isFrozen(view, account, mptIssue) ? (TER)tecLOCKED : (TER)tesSUCCESS;
}
[[nodiscard]] inline TER
checkFrozen(ReadView const& view, AccountID const& account, Asset const& asset)
{
return std::visit(
[&](auto const& issue) { return checkFrozen(view, account, issue); },
asset.value());
}
[[nodiscard]] bool
isAnyFrozen(
ReadView const& view,
@@ -725,19 +748,40 @@ transferXRP(
STAmount const& amount,
beast::Journal j);
/* Check if MPToken exists:
* - StrongAuth - before checking lsfMPTRequireAuth is set
* - WeakAuth - after checking if lsfMPTRequireAuth is set
/* Check if MPToken (for MPT) or trust line (for IOU) exists:
* - StrongAuth - before checking if authorization is required
* - WeakAuth
* for MPT - after checking lsfMPTRequireAuth flag
* for IOU - do not check if trust line exists
* - Legacy
* for MPT - before checking lsfMPTRequireAuth flag i.e. same as StrongAuth
* for IOU - do not check if trust line exists i.e. same as WeakAuth
*/
enum class MPTAuthType : bool { StrongAuth = true, WeakAuth = false };
enum class AuthType { StrongAuth, WeakAuth, Legacy };
/** Check if the account lacks required authorization.
*
* Return tecNO_AUTH or tecNO_LINE if it does
* and tesSUCCESS otherwise.
* Return tecNO_AUTH or tecNO_LINE if it does
* and tesSUCCESS otherwise.
*
* If StrongAuth then return tecNO_LINE if the RippleState doesn't exist. Return
* tecNO_AUTH if lsfRequireAuth is set on the issuer's AccountRoot, and the
* RippleState does exist, and the RippleState is not authorized.
*
* If WeakAuth then return tecNO_AUTH if lsfRequireAuth is set, and the
* RippleState exists, and is not authorized. Return tecNO_LINE if
* lsfRequireAuth is set and the RippleState doesn't exist. Consequently, if
* WeakAuth and lsfRequireAuth is *not* set, this function will return
* tesSUCCESS even if RippleState does *not* exist.
*
* The default "Legacy" auth type is equivalent to WeakAuth.
*/
[[nodiscard]] TER
requireAuth(ReadView const& view, Issue const& issue, AccountID const& account);
requireAuth(
ReadView const& view,
Issue const& issue,
AccountID const& account,
AuthType authType = AuthType::Legacy);
/** Check if the account lacks required authorization.
*
@@ -751,32 +795,33 @@ requireAuth(ReadView const& view, Issue const& issue, AccountID const& account);
* purely defensive, as we currently do not allow such vaults to be created.
*
* If StrongAuth then return tecNO_AUTH if MPToken doesn't exist or
* lsfMPTRequireAuth is set and MPToken is not authorized. If WeakAuth then
* return tecNO_AUTH if lsfMPTRequireAuth is set and MPToken doesn't exist or is
* not authorized (explicitly or via credentials, if DomainID is set in
* MPTokenIssuance). Consequently, if WeakAuth and lsfMPTRequireAuth is *not*
* set, this function will return true even if MPToken does *not* exist.
* lsfMPTRequireAuth is set and MPToken is not authorized.
*
* If WeakAuth then return tecNO_AUTH if lsfMPTRequireAuth is set and MPToken
* doesn't exist or is not authorized (explicitly or via credentials, if
* DomainID is set in MPTokenIssuance). Consequently, if WeakAuth and
* lsfMPTRequireAuth is *not* set, this function will return true even if
* MPToken does *not* exist.
*
* The default "Legacy" auth type is equivalent to StrongAuth.
*/
[[nodiscard]] TER
requireAuth(
ReadView const& view,
MPTIssue const& mptIssue,
AccountID const& account,
MPTAuthType authType = MPTAuthType::StrongAuth,
AuthType authType = AuthType::Legacy,
int depth = 0);
[[nodiscard]] TER inline requireAuth(
ReadView const& view,
Asset const& asset,
AccountID const& account,
MPTAuthType authType = MPTAuthType::StrongAuth)
AuthType authType = AuthType::Legacy)
{
return std::visit(
[&]<ValidIssueType TIss>(TIss const& issue_) {
if constexpr (std::is_same_v<TIss, Issue>)
return requireAuth(view, issue_, account);
else
return requireAuth(view, issue_, account, authType);
return requireAuth(view, issue_, account, authType);
},
asset.value());
}

View File

@@ -504,8 +504,8 @@ accountHolds(
if (zeroIfUnauthorized == ahZERO_IF_UNAUTHORIZED &&
view.rules().enabled(featureSingleAssetVault))
{
if (auto const err = requireAuth(
view, mptIssue, account, MPTAuthType::StrongAuth);
if (auto const err =
requireAuth(view, mptIssue, account, AuthType::StrongAuth);
!isTesSuccess(err))
amount.clear(mptIssue);
}
@@ -2296,15 +2296,27 @@ transferXRP(
}
TER
requireAuth(ReadView const& view, Issue const& issue, AccountID const& account)
requireAuth(
ReadView const& view,
Issue const& issue,
AccountID const& account,
AuthType authType)
{
if (isXRP(issue) || issue.account == account)
return tesSUCCESS;
auto const trustLine =
view.read(keylet::line(account, issue.account, issue.currency));
// If account has no line, and this is a strong check, fail
if (!trustLine && authType == AuthType::StrongAuth)
return tecNO_LINE;
// If this is a weak or legacy check, or if the account has a line, fail if
// auth is required and not set on the line
if (auto const issuerAccount = view.read(keylet::account(issue.account));
issuerAccount && (*issuerAccount)[sfFlags] & lsfRequireAuth)
{
if (auto const trustLine =
view.read(keylet::line(account, issue.account, issue.currency)))
if (trustLine)
return ((*trustLine)[sfFlags] &
((account > issue.account) ? lsfLowAuth : lsfHighAuth))
? tesSUCCESS
@@ -2320,7 +2332,7 @@ requireAuth(
ReadView const& view,
MPTIssue const& mptIssue,
AccountID const& account,
MPTAuthType authType,
AuthType authType,
int depth)
{
auto const mptID = keylet::mptIssuance(mptIssue.getMptID());
@@ -2355,7 +2367,7 @@ requireAuth(
if (auto const err = std::visit(
[&]<ValidIssueType TIss>(TIss const& issue) {
if constexpr (std::is_same_v<TIss, Issue>)
return requireAuth(view, issue, account);
return requireAuth(view, issue, account, authType);
else
return requireAuth(
view, issue, account, authType, depth + 1);
@@ -2370,7 +2382,8 @@ requireAuth(
auto const sleToken = view.read(mptokenID);
// if account has no MPToken, fail
if (!sleToken && authType == MPTAuthType::StrongAuth)
if (!sleToken &&
(authType == AuthType::StrongAuth || authType == AuthType::Legacy))
return tecNO_AUTH;
// Note, this check is not amendment-gated because DomainID will be always