diff --git a/.github/actions/cmake/action.yml b/.github/actions/cmake/action.yml index b40df6a2..b7d5db65 100644 --- a/.github/actions/cmake/action.yml +++ b/.github/actions/cmake/action.yml @@ -64,6 +64,7 @@ runs: -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ -DCMAKE_BUILD_TYPE="${BUILD_TYPE}" \ "${SANITIZER_OPTION}" \ + -Dtests=ON \ -Dintegration_tests="${INTEGRATION_TESTS}" \ -Dbenchmark="${BENCHMARK}" \ -Dcoverage="${COVERAGE}" \ diff --git a/.github/actions/conan/action.yml b/.github/actions/conan/action.yml index 52ed03dd..bd931f79 100644 --- a/.github/actions/conan/action.yml +++ b/.github/actions/conan/action.yml @@ -17,10 +17,6 @@ inputs: description: Build type for third-party libraries and clio. Could be 'Release', 'Debug' required: true default: "Release" - build_benchmark: - description: Whether to build benchmark tests - required: true - default: "true" runs: using: composite @@ -33,13 +29,10 @@ runs: shell: bash env: CONAN_BUILD_OPTION: "${{ inputs.force_conan_source_build == 'true' && '*' || 'missing' }}" - BUILD_BENCHMARK: "${{ inputs.build_benchmark == 'true' && 'True' || 'False' }}" run: | conan \ install . \ -of build \ -b "$CONAN_BUILD_OPTION" \ -s "build_type=${{ inputs.build_type }}" \ - -o "&:tests=True" \ - -o "&:benchmark=${BUILD_BENCHMARK}" \ --profile:all "${{ inputs.conan_profile }}" diff --git a/.github/scripts/conan/generate_matrix.py b/.github/scripts/conan/generate_matrix.py index 7aae8df7..3b3d111c 100755 --- a/.github/scripts/conan/generate_matrix.py +++ b/.github/scripts/conan/generate_matrix.py @@ -3,7 +3,7 @@ import itertools import json LINUX_OS = ["heavy", "heavy-arm64"] -LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7" }'] +LINUX_CONTAINERS = ['{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }'] LINUX_COMPILERS = ["gcc", "clang"] MACOS_OS = ["macos15"] diff --git a/.github/scripts/conan/init.sh b/.github/scripts/conan/init.sh index 929809d7..5875d57b 100755 --- a/.github/scripts/conan/init.sh +++ b/.github/scripts/conan/init.sh @@ -22,7 +22,7 @@ SANITIZER_TEMPLATE_FILE="$REPO_DIR/docker/ci/conan/sanitizer_template.profile" rm -rf "$CONAN_DIR" -conan remote add --index 0 ripple https://conan.ripplex.io +conan remote add --index 0 xrplf https://conan.ripplex.io cp "$REPO_DIR/docker/ci/conan/global.conf" "$CONAN_DIR/global.conf" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ea1208cc..3300aea8 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -45,7 +45,7 @@ jobs: build_type: [Release, Debug] container: [ - '{ "image": "ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7" }', + '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }', ] static: [true] @@ -73,7 +73,7 @@ jobs: uses: ./.github/workflows/build_impl.yml with: runs_on: heavy - container: '{ "image": "ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }' conan_profile: gcc build_type: Debug disable_cache: false @@ -91,7 +91,7 @@ jobs: uses: ./.github/workflows/build_impl.yml with: runs_on: heavy - container: '{ "image": "ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }' conan_profile: gcc build_type: Release disable_cache: false @@ -107,7 +107,7 @@ jobs: needs: build-and-test runs-on: heavy container: - image: ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7 + image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/check_libxrpl.yml b/.github/workflows/check_libxrpl.yml index 5e8314ec..ee1b36c2 100644 --- a/.github/workflows/check_libxrpl.yml +++ b/.github/workflows/check_libxrpl.yml @@ -17,7 +17,7 @@ jobs: name: Build Clio / `libXRPL ${{ github.event.client_payload.version }}` runs-on: heavy container: - image: ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7 + image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d steps: - uses: actions/checkout@v4 @@ -38,7 +38,7 @@ jobs: - name: Update conan lockfile shell: bash run: | - conan lock create . -o '&:tests=True' -o '&:benchmark=True' --profile:all ${{ env.CONAN_PROFILE }} + conan lock create . --profile:all ${{ env.CONAN_PROFILE }} - name: Run conan uses: ./.github/actions/conan @@ -67,7 +67,7 @@ jobs: needs: build runs-on: heavy container: - image: ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7 + image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d steps: - uses: actions/download-artifact@v5 diff --git a/.github/workflows/clang-tidy.yml b/.github/workflows/clang-tidy.yml index d8696014..a43f55f2 100644 --- a/.github/workflows/clang-tidy.yml +++ b/.github/workflows/clang-tidy.yml @@ -24,7 +24,7 @@ jobs: clang_tidy: runs-on: heavy container: - image: ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7 + image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d permissions: contents: write diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index a4fad118..f0f2142e 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -14,7 +14,7 @@ jobs: build: runs-on: ubuntu-latest container: - image: ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7 + image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d steps: - name: Checkout diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index ca66255c..fe631614 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -39,17 +39,17 @@ jobs: conan_profile: gcc build_type: Release static: true - container: '{ "image": "ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }' - os: heavy conan_profile: gcc build_type: Debug static: true - container: '{ "image": "ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }' - os: heavy conan_profile: gcc.ubsan build_type: Release static: false - container: '{ "image": "ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }' uses: ./.github/workflows/build_and_test.yml with: @@ -72,7 +72,7 @@ jobs: include: - os: heavy conan_profile: clang - container: '{ "image": "ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }' static: true - os: macos15 conan_profile: apple-clang diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index 8cc38974..770d1b6d 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -11,4 +11,4 @@ jobs: uses: XRPLF/actions/.github/workflows/pre-commit.yml@afbcbdafbe0ce5439492fb87eda6441371086386 with: runs_on: heavy - container: '{ "image": "ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 214dd451..bd93ad8e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -29,7 +29,7 @@ jobs: conan_profile: gcc build_type: Release static: true - container: '{ "image": "ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }' uses: ./.github/workflows/build_and_test.yml with: diff --git a/.github/workflows/release_impl.yml b/.github/workflows/release_impl.yml index 903a3cd5..784be356 100644 --- a/.github/workflows/release_impl.yml +++ b/.github/workflows/release_impl.yml @@ -42,7 +42,7 @@ jobs: release: runs-on: heavy container: - image: ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7 + image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d env: GH_REPO: ${{ github.repository }} GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/sanitizers.yml b/.github/workflows/sanitizers.yml index a092073e..26cabdcb 100644 --- a/.github/workflows/sanitizers.yml +++ b/.github/workflows/sanitizers.yml @@ -44,7 +44,7 @@ jobs: uses: ./.github/workflows/build_and_test.yml with: runs_on: heavy - container: '{ "image": "ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7" }' + container: '{ "image": "ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d" }' disable_cache: true conan_profile: ${{ matrix.compiler }}${{ matrix.sanitizer_ext }} build_type: ${{ matrix.build_type }} diff --git a/.github/workflows/update_docker_ci.yml b/.github/workflows/update_docker_ci.yml index 5f642d68..d614a475 100644 --- a/.github/workflows/update_docker_ci.yml +++ b/.github/workflows/update_docker_ci.yml @@ -56,7 +56,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5 + uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0 with: files: "docker/compilers/gcc/**" @@ -94,7 +94,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5 + uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0 with: files: "docker/compilers/gcc/**" @@ -132,7 +132,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5 + uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0 with: files: "docker/compilers/gcc/**" @@ -183,7 +183,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5 + uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0 with: files: "docker/compilers/clang/**" @@ -219,7 +219,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5 + uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0 with: files: "docker/tools/**" @@ -250,7 +250,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5 + uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0 with: files: "docker/tools/**" @@ -281,7 +281,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@ed68ef82c095e0d48ec87eccea555d944a631a4c # v46.0.5 + uses: tj-actions/changed-files@24d32ffd492484c1d75e0c0b894501ddb9d30d62 # v47.0.0 with: files: "docker/tools/**" diff --git a/.github/workflows/upload_conan_deps.yml b/.github/workflows/upload_conan_deps.yml index eb1d620e..bc302ec3 100644 --- a/.github/workflows/upload_conan_deps.yml +++ b/.github/workflows/upload_conan_deps.yml @@ -60,6 +60,7 @@ jobs: strategy: fail-fast: false matrix: ${{ fromJson(needs.generate-matrix.outputs.matrix) }} + max-parallel: 10 runs-on: ${{ matrix.os }} container: ${{ matrix.container != '' && fromJson(matrix.container) || null }} @@ -94,8 +95,8 @@ jobs: - name: Login to Conan if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' - run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} ripple ${{ secrets.CONAN_USERNAME }} + run: conan remote login -p ${{ secrets.CONAN_PASSWORD }} xrplf ${{ secrets.CONAN_USERNAME }} - name: Upload Conan packages if: github.repository_owner == 'XRPLF' && github.event_name != 'pull_request' && github.event_name != 'schedule' - run: conan upload "*" -r=ripple --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }} + run: conan upload "*" -r=xrplf --confirm ${{ github.event.inputs.force_upload == 'true' && '--force' || '' }} diff --git a/.github/workflows/upload_coverage_report.yml b/.github/workflows/upload_coverage_report.yml index e2e72e77..86a4eaa7 100644 --- a/.github/workflows/upload_coverage_report.yml +++ b/.github/workflows/upload_coverage_report.yml @@ -25,7 +25,7 @@ jobs: - name: Upload coverage report if: ${{ hashFiles('build/coverage_report.xml') != '' }} - uses: codecov/codecov-action@fdcc8476540edceab3de004e990f80d881c6cc00 # v5.5.0 + uses: codecov/codecov-action@5a1091511ad55cbe89839c7260b706298ca349f7 # v5.5.1 with: files: build/coverage_report.xml fail_ci_if_error: true diff --git a/benchmarks/util/log/LoggerBenchmark.cpp b/benchmarks/util/log/LoggerBenchmark.cpp index cc36c0da..0b7467e3 100644 --- a/benchmarks/util/log/LoggerBenchmark.cpp +++ b/benchmarks/util/log/LoggerBenchmark.cpp @@ -42,7 +42,7 @@ using namespace util; static constexpr auto kLOG_FORMAT = "%Y-%m-%d %H:%M:%S.%f %^%3!l:%n%$ - %v"; struct BenchmarkLoggingInitializer { - static std::shared_ptr + [[nodiscard]] static std::shared_ptr createFileSink(LogService::FileLoggingParams const& params) { return LogService::createFileSink(params, kLOG_FORMAT); @@ -72,8 +72,6 @@ uniqueLogDir() static void benchmarkConcurrentFileLogging(benchmark::State& state) { - spdlog::drop_all(); - auto const numThreads = static_cast(state.range(0)); auto const messagesPerThread = static_cast(state.range(1)); @@ -84,7 +82,9 @@ benchmarkConcurrentFileLogging(benchmark::State& state) state.PauseTiming(); std::filesystem::create_directories(logDir); - spdlog::init_thread_pool(8192, 1); + static constexpr size_t kQUEUE_SIZE = 8192; + static constexpr size_t kTHREAD_COUNT = 1; + spdlog::init_thread_pool(kQUEUE_SIZE, kTHREAD_COUNT); auto fileSink = BenchmarkLoggingInitializer::createFileSink({ .logDir = logDir, diff --git a/conanfile.py b/conanfile.py index 90dfb8ac..60bdeb0a 100644 --- a/conanfile.py +++ b/conanfile.py @@ -9,10 +9,7 @@ class ClioConan(ConanFile): url = 'https://github.com/xrplf/clio' description = 'Clio RPC server' settings = 'os', 'compiler', 'build_type', 'arch' - options = { - 'tests': [True, False], - 'benchmark': [True, False], - } + options = {} requires = [ 'boost/1.83.0', @@ -28,9 +25,6 @@ class ClioConan(ConanFile): ] default_options = { - 'tests': False, - 'benchmark': False, - 'xrpl/*:tests': False, 'xrpl/*:rocksdb': False, 'cassandra-cpp-driver/*:shared': False, @@ -51,10 +45,8 @@ class ClioConan(ConanFile): ) def requirements(self): - if self.options.tests or self.options.integration_tests: - self.requires('gtest/1.14.0') - if self.options.benchmark: - self.requires('benchmark/1.9.4') + self.requires('gtest/1.14.0') + self.requires('benchmark/1.9.4') def configure(self): if self.settings.compiler == 'apple-clang': @@ -70,8 +62,6 @@ class ClioConan(ConanFile): def generate(self): tc = CMakeToolchain(self) - for option_name, option_value in self.options.items(): - tc.variables[option_name] = option_value tc.generate() def build(self): diff --git a/docker/ci/Dockerfile b/docker/ci/Dockerfile index 96b0f7ed..22fc917c 100644 --- a/docker/ci/Dockerfile +++ b/docker/ci/Dockerfile @@ -115,7 +115,7 @@ COPY --from=clio-tools \ WORKDIR /root # Setup conan -RUN conan remote add --index 0 ripple https://conan.ripplex.io +RUN conan remote add --index 0 xrplf https://conan.ripplex.io WORKDIR /root/.conan2 COPY conan/global.conf ./global.conf diff --git a/docker/develop/compose.yaml b/docker/develop/compose.yaml index af12d64b..b092a6ee 100644 --- a/docker/develop/compose.yaml +++ b/docker/develop/compose.yaml @@ -1,6 +1,6 @@ services: clio_develop: - image: ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7 + image: ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d volumes: - clio_develop_conan_data:/root/.conan2/p - clio_develop_ccache:/root/.ccache diff --git a/docs/build-clio.md b/docs/build-clio.md index e61943e7..5118b650 100644 --- a/docs/build-clio.md +++ b/docs/build-clio.md @@ -90,7 +90,7 @@ core.upload:parallel={{os.cpu_count()}} Make sure artifactory is setup with Conan. ```sh -conan remote add --index 0 ripple https://conan.ripplex.io +conan remote add --index 0 xrplf https://conan.ripplex.io ``` Now you should be able to download the prebuilt dependencies (including `xrpl` package) on supported platforms. @@ -104,79 +104,100 @@ It is implicitly used when running `conan` commands, you don't need to specify i You have to update this file every time you add a new dependency or change a revision or version of an existing dependency. -To do that, run the following command in the repository root: +> [!NOTE] +> Conan uses local cache by default when creating a lockfile. +> +> To ensure, that lockfile creation works the same way on all developer machines, you should clear the local cache before creating a new lockfile. + +To create a new lockfile, run the following commands in the repository root: ```bash -conan lock create . -o '&:tests=True' -o '&:benchmark=True' +conan remove '*' --confirm +rm conan.lock +# This ensure that xrplf remote is the first to be consulted +conan remote add --force --index 0 xrplf https://conan.ripplex.io +conan lock create . ``` +> [!NOTE] +> If some dependencies are exclusive for some OS, you may need to run the last command for them adding `--profile:all `. + ## Building Clio -Navigate to Clio's root directory and run: +1. Navigate to Clio's root directory and run: -```sh -mkdir build && cd build -# You can also specify profile explicitly by adding `--profile:all ` -conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -# You can also add -GNinja to use Ninja build system instead of Make -cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. -cmake --build . --parallel 8 # or without the number if you feel extra adventurous -``` + ```sh + mkdir build && cd build + ``` -> [!TIP] -> You can omit the `-o '&:tests=True'` if you don't want to build `clio_tests`. +2. Install dependencies through conan. -If successful, `conan install` will find the required packages and `cmake` will do the rest. You should see `clio_server` and `clio_tests` in the `build` directory (the current directory). + ```sh + conan install .. --output-folder . --build missing --settings build_type=Release + ``` -> [!TIP] -> To generate a Code Coverage report, include `-o '&:coverage=True'` in the `conan install` command above, along with `-o '&:tests=True'` to enable tests. -> After running the `cmake` commands, execute `make clio_tests-ccov`. -> The coverage report will be found at `clio_tests-llvm-cov/index.html`. + > You can add `--profile:all ` to choose a specific conan profile. - +3. Configure and generate build files with CMake. + + ```sh + cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. + ``` + + > You can add `-GNinja` to use the Ninja build system (instead of Make). + +4. Now, you can build all targets or specific ones: + + ```sh + # builds all targets + cmake --build . --parallel 8 + # builds only clio_server target + cmake --build . --parallel 8 --target clio_server + ``` + + You should see `clio_server` and `clio_tests` in the current directory. > [!NOTE] > If you've built Clio before and the build is now failing, it's likely due to updated dependencies. Try deleting the build folder and then rerunning the Conan and CMake commands mentioned above. +### CMake options + +There are several CMake options you can use to customize the build: + +| CMake Option | Default | CMake Target | Description | +| --------------------- | ------- | -------------------------------------------------------- | ------------------------------------- | +| `-Dcoverage` | OFF | `clio_tests-ccov` | Enables code coverage generation | +| `-Dtests` | OFF | `clio_tests` | Enables unit tests | +| `-Dintegration_tests` | OFF | `clio_integration_tests` | Enables integration tests | +| `-Dbenchmark` | OFF | `clio_benchmark` | Enables benchmark executable | +| `-Ddocs` | OFF | `docs` | Enables API documentation generation | +| `-Dlint` | OFF | See [#clang-tidy](#using-clang-tidy-for-static-analysis) | Enables `clang-tidy` static analysis | +| `-Dsan` | N/A | N/A | Enables Sanitizer (asan, tsan, ubsan) | +| `-Dpackage` | OFF | N/A | Creates a debian package | + ### Generating API docs for Clio The API documentation for Clio is generated by [Doxygen](https://www.doxygen.nl/index.html). If you want to generate the API documentation when building Clio, make sure to install Doxygen 1.12.0 on your system. -To generate the API docs: +To generate the API docs, please use CMake option `-Ddocs=ON` as described above and build the `docs` target. -1. First, include `-o '&:docs=True'` in the conan install command. For example: +To view the generated files, go to `build/docs/html`. +Open the `index.html` file in your browser to see the documentation pages. - ```sh - mkdir build && cd build - conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -o '&:docs=True' - ``` - -2. Once that has completed successfully, run the `cmake` command and add the `--target docs` option: - - ```sh - cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. - cmake --build . --parallel 8 --target docs - ``` - -3. Go to `build/docs/html` to view the generated files. - - Open the `index.html` file in your browser to see the documentation pages. - - ![API index page](./img/doxygen-docs-output.png "API index page") +![API index page](./img/doxygen-docs-output.png "API index page") ## Building Clio with Docker It is also possible to build Clio using [Docker](https://www.docker.com/) if you don't want to install all the dependencies on your machine. ```sh -docker run -it ghcr.io/xrplf/clio-ci:0e8896ad064a5290c4805318b549df16403ca2d7 +docker run -it ghcr.io/xrplf/clio-ci:384e79cd32f5f6c0ab9be3a1122ead41c5a7e67d git clone https://github.com/XRPLF/clio -mkdir build && cd build -conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. -cmake --build . --parallel 8 # or without the number if you feel extra adventurous +cd clio ``` +Follow the same steps in the [Building Clio](#building-clio) section. You can use `--profile:all gcc` or `--profile:all clang` with the `conan install` command to choose the desired compiler. + ## Developing against `rippled` in standalone mode If you wish to develop against a `rippled` instance running in standalone mode there are a few quirks of both Clio and `rippled` that you need to keep in mind. You must: @@ -229,10 +250,10 @@ Sometimes, during development, you need to build against a custom version of `li ## Using `clang-tidy` for static analysis Clang-tidy can be run by CMake when building the project. -To achieve this, you just need to provide the option `-o '&:lint=True'` for the `conan install` command: +To achieve this, you just need to provide the option `-Dlint=ON` when generating CMake files: ```sh -conan install .. --output-folder . --build missing --settings build_type=Release -o '&:tests=True' -o '&:lint=True' --profile:all clang +cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dlint=ON .. ``` By default CMake will try to find `clang-tidy` automatically in your system. diff --git a/src/data/BackendInterface.cpp b/src/data/BackendInterface.cpp index c6ffbb0b..0542e0d8 100644 --- a/src/data/BackendInterface.cpp +++ b/src/data/BackendInterface.cpp @@ -43,11 +43,6 @@ #include #include -// local to compilation unit loggers -namespace { -util::Logger gLog{"Backend"}; -} // namespace - /** * @brief This namespace implements the data access layer and related components. * @@ -58,10 +53,10 @@ namespace data { bool BackendInterface::finishWrites(std::uint32_t const ledgerSequence) { - LOG(gLog.debug()) << "Want finish writes for " << ledgerSequence; + LOG(log_.debug()) << "Want finish writes for " << ledgerSequence; auto commitRes = doFinishWrites(); if (commitRes) { - LOG(gLog.debug()) << "Successfully committed. Updating range now to " << ledgerSequence; + LOG(log_.debug()) << "Successfully committed. Updating range now to " << ledgerSequence; updateRange(ledgerSequence); } return commitRes; @@ -89,15 +84,15 @@ BackendInterface::fetchLedgerObject( { auto obj = cache_.get().get(key, sequence); if (obj) { - LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key); + LOG(log_.trace()) << "Cache hit - " << ripple::strHex(key); return obj; } auto dbObj = doFetchLedgerObject(key, sequence, yield); if (!dbObj) { - LOG(gLog.trace()) << "Missed cache and missed in db"; + LOG(log_.trace()) << "Missed cache and missed in db"; } else { - LOG(gLog.trace()) << "Missed cache but found in db"; + LOG(log_.trace()) << "Missed cache but found in db"; } return dbObj; } @@ -111,7 +106,7 @@ BackendInterface::fetchLedgerObjectSeq( { auto seq = doFetchLedgerObjectSeq(key, sequence, yield); if (!seq) - LOG(gLog.trace()) << "Missed in db"; + LOG(log_.trace()) << "Missed in db"; return seq; } @@ -133,7 +128,7 @@ BackendInterface::fetchLedgerObjects( misses.push_back(keys[i]); } } - LOG(gLog.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size(); + LOG(log_.trace()) << "Cache hits = " << keys.size() - misses.size() << " - cache misses = " << misses.size(); if (!misses.empty()) { auto objs = doFetchLedgerObjects(misses, sequence, yield); @@ -158,9 +153,9 @@ BackendInterface::fetchSuccessorKey( { auto succ = cache_.get().getSuccessor(key, ledgerSequence); if (succ) { - LOG(gLog.trace()) << "Cache hit - " << ripple::strHex(key); + LOG(log_.trace()) << "Cache hit - " << ripple::strHex(key); } else { - LOG(gLog.trace()) << "Cache miss - " << ripple::strHex(key); + LOG(log_.trace()) << "Cache miss - " << ripple::strHex(key); } return succ ? succ->key : doFetchSuccessorKey(key, ledgerSequence, yield); } @@ -210,7 +205,7 @@ BackendInterface::fetchBookOffers( numSucc++; succMillis += getMillis(mid2 - mid1); if (!offerDir || offerDir->key >= bookEnd) { - LOG(gLog.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking"; + LOG(log_.trace()) << "offerDir.has_value() " << offerDir.has_value() << " breaking"; break; } uTipIndex = offerDir->key; @@ -223,7 +218,7 @@ BackendInterface::fetchBookOffers( keys.insert(keys.end(), indexes.begin(), indexes.end()); auto next = sle.getFieldU64(ripple::sfIndexNext); if (next == 0u) { - LOG(gLog.trace()) << "Next is empty. breaking"; + LOG(log_.trace()) << "Next is empty. breaking"; break; } auto nextKey = ripple::keylet::page(uTipIndex, next); @@ -238,13 +233,13 @@ BackendInterface::fetchBookOffers( auto mid = std::chrono::system_clock::now(); auto objs = fetchLedgerObjects(keys, ledgerSequence, yield); for (size_t i = 0; i < keys.size() && i < limit; ++i) { - LOG(gLog.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i]) + LOG(log_.trace()) << "Key = " << ripple::strHex(keys[i]) << " blob = " << ripple::strHex(objs[i]) << " ledgerSequence = " << ledgerSequence; ASSERT(!objs[i].empty(), "Ledger object can't be empty"); page.offers.push_back({keys[i], objs[i]}); } auto end = std::chrono::system_clock::now(); - LOG(gLog.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took " + LOG(log_.debug()) << "Fetching " << std::to_string(keys.size()) << " offers took " << std::to_string(getMillis(mid - begin)) << " milliseconds. Fetching next dir took " << std::to_string(succMillis) << " milliseconds. Fetched next dir " << std::to_string(numSucc) << " times" @@ -341,13 +336,13 @@ BackendInterface::fetchLedgerPage( if (!objects[i].empty()) { page.objects.push_back({keys[i], std::move(objects[i])}); } else if (!outOfOrder) { - LOG(gLog.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i]) + LOG(log_.error()) << "Deleted or non-existent object in successor table. key = " << ripple::strHex(keys[i]) << " - seq = " << ledgerSequence; std::stringstream msg; for (size_t j = 0; j < objects.size(); ++j) { msg << " - " << ripple::strHex(keys[j]); } - LOG(gLog.error()) << msg.str(); + LOG(log_.error()) << msg.str(); if (corruptionDetector_.has_value()) corruptionDetector_->onCorruptionDetected(); @@ -368,7 +363,7 @@ BackendInterface::fetchFees(std::uint32_t const seq, boost::asio::yield_context auto bytes = fetchLedgerObject(key, seq, yield); if (!bytes) { - LOG(gLog.error()) << "Could not find fees"; + LOG(log_.error()) << "Could not find fees"; return {}; } diff --git a/src/data/BackendInterface.hpp b/src/data/BackendInterface.hpp index 047e8c80..898552f9 100644 --- a/src/data/BackendInterface.hpp +++ b/src/data/BackendInterface.hpp @@ -138,6 +138,7 @@ synchronousAndRetryOnTimeout(FnType&& func) */ class BackendInterface { protected: + util::Logger log_{"Backend"}; mutable std::shared_mutex rngMtx_; std::optional range_; std::reference_wrapper cache_; diff --git a/src/etlng/ETLService.cpp b/src/etlng/ETLService.cpp index fa337cbe..595da9d1 100644 --- a/src/etlng/ETLService.cpp +++ b/src/etlng/ETLService.cpp @@ -305,6 +305,8 @@ ETLService::startLoading(uint32_t seq) { ASSERT(not state_->isStrictReadonly, "This should only happen on writer nodes"); taskMan_ = taskManagerProvider_->make(ctx_, *monitor_, seq, finishSequence_); + + // FIXME: this legacy name "extractor_threads" is no longer accurate (we have coroutines now) taskMan_->run(config_.get().get("extractor_threads")); } diff --git a/src/etlng/impl/TaskManager.cpp b/src/etlng/impl/TaskManager.cpp index b02a3ef5..1fe310ec 100644 --- a/src/etlng/impl/TaskManager.cpp +++ b/src/etlng/impl/TaskManager.cpp @@ -26,6 +26,7 @@ #include "etlng/SchedulerInterface.hpp" #include "etlng/impl/Monitor.hpp" #include "etlng/impl/TaskQueue.hpp" +#include "util/Assert.hpp" #include "util/Constants.hpp" #include "util/LedgerUtils.hpp" #include "util/Profiler.hpp" @@ -70,11 +71,10 @@ TaskManager::~TaskManager() void TaskManager::run(std::size_t numExtractors) { - LOG(log_.debug()) << "Starting task manager with " << numExtractors << " extractors...\n"; + ASSERT(not running_, "TaskManager can only be started once"); + running_ = true; - stop(); - extractors_.clear(); - loaders_.clear(); + LOG(log_.debug()) << "Starting task manager with " << numExtractors << " extractors...\n"; extractors_.reserve(numExtractors); for ([[maybe_unused]] auto _ : std::views::iota(0uz, numExtractors)) @@ -157,7 +157,8 @@ TaskManager::spawnLoader(TaskQueue& queue) monitor_.get().notifySequenceLoaded(data->seq); } else { // TODO (https://github.com/XRPLF/clio/issues/1852) this is probably better done with a timeout (on - // coroutine) so that the thread itself is not blocked + // coroutine) so that the thread itself is not blocked. for now this implies that the context + // (io_threads) needs at least 2 threads queue.awaitTask(); } } @@ -178,6 +179,8 @@ TaskManager::wait() void TaskManager::stop() { + ASSERT(running_, "TaskManager is not running"); + for (auto& extractor : extractors_) extractor.abort(); for (auto& loader : loaders_) diff --git a/src/etlng/impl/TaskManager.hpp b/src/etlng/impl/TaskManager.hpp index 188ceef1..2b205154 100644 --- a/src/etlng/impl/TaskManager.hpp +++ b/src/etlng/impl/TaskManager.hpp @@ -56,6 +56,7 @@ class TaskManager : public TaskManagerInterface { std::vector> extractors_; std::vector> loaders_; + std::atomic_bool running_ = false; util::Logger log_{"ETL"}; public: diff --git a/src/feed/impl/LedgerFeed.hpp b/src/feed/impl/LedgerFeed.hpp index 9e356077..f8e93cd0 100644 --- a/src/feed/impl/LedgerFeed.hpp +++ b/src/feed/impl/LedgerFeed.hpp @@ -41,7 +41,8 @@ namespace feed::impl { * @brief Feed that publishes the ledger info. * Example : {'type': 'ledgerClosed', 'ledger_index': 2647935, 'ledger_hash': * '5D022718CD782A82EE10D2147FD90B5F42F26A7E937C870B4FE3CF1086C916AE', 'ledger_time': 756395681, 'fee_base': 10, - * 'reserve_base': 10000000, 'reserve_inc': 2000000, 'validated_ledgers': '2619127-2647935', 'txn_count': 0} + * 'reserve_base': 10000000, 'reserve_inc': 2000000, 'validated_ledgers': '2619127-2647935', 'txn_count': 0, + * 'network_id': 1} */ class LedgerFeed : public SingleFeedBase { public: @@ -57,7 +58,8 @@ public: * @brief Subscribe the ledger feed. * @param yield The coroutine yield. * @param backend The backend. - * @param subscriber + * @param subscriber The subscriber. + * @param networkID The network ID. * @return The information of the latest ledger. */ boost::json::object @@ -72,6 +74,7 @@ public: * @param fees The fees. * @param ledgerRange The ledger range. * @param txnCount The transaction count. + * @param networkID The network ID. */ void pub(ripple::LedgerHeader const& lgrInfo, diff --git a/src/feed/impl/TransactionFeed.cpp b/src/feed/impl/TransactionFeed.cpp index 503721b5..79f69556 100644 --- a/src/feed/impl/TransactionFeed.cpp +++ b/src/feed/impl/TransactionFeed.cpp @@ -209,7 +209,6 @@ TransactionFeed::pub( auto& txnPubobj = pubObj[txKey].as_object(); rpc::insertDeliverMaxAlias(txnPubobj, version); - rpc::insertMPTIssuanceID(txnPubobj, meta); Json::Value nftJson; ripple::RPC::insertNFTSyntheticInJson(nftJson, tx, *meta); diff --git a/src/rpc/RPCHelpers.cpp b/src/rpc/RPCHelpers.cpp index 2f7eb09c..958119fe 100644 --- a/src/rpc/RPCHelpers.cpp +++ b/src/rpc/RPCHelpers.cpp @@ -33,7 +33,6 @@ #include "web/Context.hpp" #include -#include #include #include #include @@ -102,11 +101,6 @@ #include #include -// local to compilation unit loggers -namespace { -util::Logger gLog{"RPC"}; -} // namespace - namespace rpc { std::optional @@ -209,6 +203,8 @@ accountFromStringStrict(std::string const& account) std::pair, std::shared_ptr> deserializeTxPlusMeta(data::TransactionAndMetadata const& blobs) { + static util::Logger const log{"RPC"}; // NOLINT(readability-identifier-naming) + try { std::pair, std::shared_ptr> result; { @@ -225,9 +221,9 @@ deserializeTxPlusMeta(data::TransactionAndMetadata const& blobs) std::stringstream meta; std::ranges::copy(blobs.transaction, std::ostream_iterator(txn)); std::ranges::copy(blobs.metadata, std::ostream_iterator(meta)); - LOG(gLog.error()) << "Failed to deserialize transaction. txn = " << txn.str() << " - meta = " << meta.str() - << " txn length = " << std::to_string(blobs.transaction.size()) - << " meta length = " << std::to_string(blobs.metadata.size()); + LOG(log.error()) << "Failed to deserialize transaction. txn = " << txn.str() << " - meta = " << meta.str() + << " txn length = " << std::to_string(blobs.transaction.size()) + << " meta length = " << std::to_string(blobs.metadata.size()); throw e; } } @@ -263,7 +259,6 @@ toExpandedJson( auto metaJson = toJson(*meta); insertDeliveredAmount(metaJson, txn, meta, blobs.date); insertDeliverMaxAlias(txnJson, apiVersion); - insertMPTIssuanceID(txnJson, meta); if (nftEnabled == NFTokenjson::ENABLE) { Json::Value nftJson; @@ -322,64 +317,6 @@ insertDeliveredAmount( return false; } -/** - * @brief Get the delivered amount - * - * @param meta The metadata - * @return The mpt_issuance_id or std::nullopt if not available - */ -static std::optional -getMPTIssuanceID(std::shared_ptr const& meta) -{ - ripple::TxMeta const& transactionMeta = *meta; - - for (ripple::STObject const& node : transactionMeta.getNodes()) { - if (node.getFieldU16(ripple::sfLedgerEntryType) != ripple::ltMPTOKEN_ISSUANCE || - node.getFName() != ripple::sfCreatedNode) - continue; - - auto const& mptNode = node.peekAtField(ripple::sfNewFields).downcast(); - return ripple::makeMptID(mptNode[ripple::sfSequence], mptNode[ripple::sfIssuer]); - } - - return {}; -} - -/** - * @brief Check if transaction has a new MPToken created - * - * @param txnJson The transaction Json - * @param meta The metadata - * @return true if the transaction can have a mpt_issuance_id - */ -static bool -canHaveMPTIssuanceID(boost::json::object const& txnJson, std::shared_ptr const& meta) -{ - if (txnJson.at(JS(TransactionType)).is_string() and - not boost::iequals(txnJson.at(JS(TransactionType)).as_string(), JS(MPTokenIssuanceCreate))) - return false; - - if (meta->getResultTER() != ripple::tesSUCCESS) - return false; - - return true; -} - -bool -insertMPTIssuanceID(boost::json::object& txnJson, std::shared_ptr const& meta) -{ - if (!canHaveMPTIssuanceID(txnJson, meta)) - return false; - - if (auto const id = getMPTIssuanceID(meta)) { - txnJson[JS(mpt_issuance_id)] = ripple::to_string(*id); - return true; - } - - assert(false); - return false; -} - void insertDeliverMaxAlias(boost::json::object& txJson, std::uint32_t const apiVersion) { @@ -804,7 +741,9 @@ traverseOwnedNodes( } auto end = std::chrono::system_clock::now(); - LOG(gLog.debug()) << fmt::format( + static util::Logger const log{"RPC"}; // NOLINT(readability-identifier-naming) + + LOG(log.debug()) << fmt::format( "Time loading owned directories: {} milliseconds, entries size: {}", std::chrono::duration_cast(end - start).count(), keys.size() @@ -812,7 +751,7 @@ traverseOwnedNodes( auto [objects, timeDiff] = util::timed([&]() { return backend.fetchLedgerObjects(keys, sequence, yield); }); - LOG(gLog.debug()) << "Time loading owned entries: " << timeDiff << " milliseconds"; + LOG(log.debug()) << "Time loading owned entries: " << timeDiff << " milliseconds"; for (auto i = 0u; i < objects.size(); ++i) { ripple::SerialIter it{objects[i].data(), objects[i].size()}; @@ -1300,7 +1239,8 @@ postProcessOrderBook( jsonOffers.push_back(offerJson); } catch (std::exception const& e) { - LOG(gLog.error()) << "caught exception: " << e.what(); + util::Logger const log{"RPC"}; + LOG(log.error()) << "caught exception: " << e.what(); } } return jsonOffers; diff --git a/src/rpc/RPCHelpers.hpp b/src/rpc/RPCHelpers.hpp index 8f298b86..d7395828 100644 --- a/src/rpc/RPCHelpers.hpp +++ b/src/rpc/RPCHelpers.hpp @@ -199,16 +199,6 @@ insertDeliveredAmount( uint32_t date ); -/** - * @brief Add "mpt_issuance_id" into MPTokenIssuanceCreate transaction json. - * - * @param txnJson The transaction Json object - * @param meta The metadata object - * @return true if the "mpt_issuance_id" is added to the metadata json object - */ -bool -insertMPTIssuanceID(boost::json::object& txnJson, std::shared_ptr const& meta); - /** * @brief Convert STBase object to JSON * diff --git a/src/util/Assert.cpp b/src/util/Assert.cpp index 1acb6988..87165d60 100644 --- a/src/util/Assert.cpp +++ b/src/util/Assert.cpp @@ -54,7 +54,7 @@ OnAssert::resetAction() void OnAssert::defaultAction(std::string_view message) { - if (LogService::enabled()) { + if (LogServiceState::initialized()) { LOG(LogService::fatal()) << message; } else { std::cerr << message; diff --git a/src/util/log/Logger.cpp b/src/util/log/Logger.cpp index 5c0da1df..576cf3bf 100644 --- a/src/util/log/Logger.cpp +++ b/src/util/log/Logger.cpp @@ -41,12 +41,12 @@ #include #include -#include #include #include #include #include #include +#include #include #include #include @@ -56,7 +56,10 @@ namespace util { -LogService::Data LogService::data{}; +bool LogServiceState::isAsync_{true}; +Severity LogServiceState::defaultSeverity_{Severity::NFO}; +std::vector LogServiceState::sinks_{}; +bool LogServiceState::initialized_{false}; namespace { @@ -238,23 +241,71 @@ getMinSeverity(config::ClioConfigDefinition const& config, Severity defaultSever return minSeverity; } -std::shared_ptr -LogService::registerLogger(std::string const& channel, Severity severity) +void +LogServiceState::init(bool isAsync, Severity defaultSeverity, std::vector const& sinks) { - std::shared_ptr logger; - if (data.isAsync) { - logger = std::make_shared( - channel, - data.allSinks.begin(), - data.allSinks.end(), - spdlog::thread_pool(), - spdlog::async_overflow_policy::block - ); - } else { - logger = std::make_shared(channel, data.allSinks.begin(), data.allSinks.end()); + if (initialized_) { + throw std::logic_error("LogServiceState is already initialized"); } - logger->set_level(toSpdlogLevel(severity)); + isAsync_ = isAsync; + defaultSeverity_ = defaultSeverity; + sinks_ = sinks; + initialized_ = true; + + spdlog::apply_all([](std::shared_ptr logger) { + logger->set_level(toSpdlogLevel(defaultSeverity_)); + }); + + if (isAsync) { + static constexpr size_t kQUEUE_SIZE = 8192; + static constexpr size_t kTHREAD_COUNT = 1; + spdlog::init_thread_pool(kQUEUE_SIZE, kTHREAD_COUNT); + } +} + +bool +LogServiceState::initialized() +{ + return initialized_; +} + +void +LogServiceState::reset() +{ + if (not initialized()) { + throw std::logic_error("LogService is not initialized"); + } + isAsync_ = true; + defaultSeverity_ = Severity::NFO; + sinks_.clear(); + initialized_ = false; +} + +std::shared_ptr +LogServiceState::registerLogger(std::string const& channel, std::optional severity) +{ + if (not initialized_) { + throw std::logic_error("LogService is not initialized"); + } + + std::shared_ptr existingLogger = spdlog::get(channel); + if (existingLogger != nullptr) { + if (severity.has_value()) + existingLogger->set_level(toSpdlogLevel(*severity)); + return existingLogger; + } + + std::shared_ptr logger; + if (isAsync_) { + logger = std::make_shared( + channel, sinks_.begin(), sinks_.end(), spdlog::thread_pool(), spdlog::async_overflow_policy::block + ); + } else { + logger = std::make_shared(channel, sinks_.begin(), sinks_.end()); + } + + logger->set_level(toSpdlogLevel(severity.value_or(defaultSeverity_))); logger->flush_on(spdlog::level::err); spdlog::register_logger(logger); @@ -262,22 +313,12 @@ LogService::registerLogger(std::string const& channel, Severity severity) return logger; } -std::expected -LogService::init(config::ClioConfigDefinition const& config) +std::expected, std::string> +LogService::getSinks(config::ClioConfigDefinition const& config) { - // Drop existing loggers - spdlog::drop_all(); - - data.isAsync = config.get("log.is_async"); - data.defaultSeverity = getSeverityLevel(config.get("log.level")); - std::string const format = config.get("log.format"); - if (data.isAsync) { - spdlog::init_thread_pool(8192, 1); - } - - data.allSinks = createConsoleSinks(config.get("log.enable_console"), format); + std::vector allSinks = createConsoleSinks(config.get("log.enable_console"), format); if (auto const logDir = config.maybeValue("log.directory"); logDir.has_value()) { std::filesystem::path const dirPath{logDir.value()}; @@ -294,11 +335,27 @@ LogService::init(config::ClioConfigDefinition const& config) .rotationSizeMB = config.get("log.rotation_size"), .dirMaxFiles = config.get("log.directory_max_files"), }; - data.allSinks.push_back(createFileSink(params, format)); + allSinks.push_back(createFileSink(params, format)); + } + return allSinks; +} + +std::expected +LogService::init(config::ClioConfigDefinition const& config) +{ + auto const sinksMaybe = getSinks(config); + if (!sinksMaybe.has_value()) { + return std::unexpected{sinksMaybe.error()}; } + LogServiceState::init( + config.get("log.is_async"), + getSeverityLevel(config.get("log.level")), + std::move(sinksMaybe).value() + ); + // get min severity per channel, can be overridden using the `log.channels` array - auto const maybeMinSeverity = getMinSeverity(config, data.defaultSeverity); + auto const maybeMinSeverity = getMinSeverity(config, defaultSeverity_); if (!maybeMinSeverity) { return std::unexpected{maybeMinSeverity.error()}; } @@ -307,20 +364,23 @@ LogService::init(config::ClioConfigDefinition const& config) // Create loggers for each channel for (auto const& channel : Logger::kCHANNELS) { auto const it = minSeverity.find(channel); - auto const severity = (it != minSeverity.end()) ? it->second : data.defaultSeverity; + auto const severity = (it != minSeverity.end()) ? it->second : defaultSeverity_; registerLogger(channel, severity); } spdlog::set_default_logger(spdlog::get("General")); - LOG(LogService::info()) << "Default log level = " << toString(data.defaultSeverity); + LOG(LogService::info()) << "Default log level = " << toString(defaultSeverity_); return {}; } void LogService::shutdown() { - spdlog::shutdown(); + if (initialized_ && isAsync_) { + // We run in async mode in production, so we need to make sure all logs are flushed before shutting down + spdlog::shutdown(); + } } Logger::Pump @@ -359,17 +419,15 @@ LogService::fatal(SourceLocationType const& loc) return Logger(spdlog::default_logger()).fatal(loc); } -bool -LogService::enabled() +void +LogServiceState::replaceSinks(std::vector> const& sinks) { - return spdlog::get_level() != spdlog::level::off; + sinks_ = sinks; + spdlog::apply_all([](std::shared_ptr logger) { logger->sinks() = sinks_; }); } -Logger::Logger(std::string channel) : logger_(spdlog::get(channel)) +Logger::Logger(std::string channel) : logger_(LogServiceState::registerLogger(channel)) { - if (!logger_) { - logger_ = LogService::registerLogger(channel); - } } Logger::Pump::Pump(std::shared_ptr logger, Severity sev, SourceLocationType const& loc) diff --git a/src/util/log/Logger.hpp b/src/util/log/Logger.hpp index 49209ff0..7ba17b71 100644 --- a/src/util/log/Logger.hpp +++ b/src/util/log/Logger.hpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -43,9 +44,15 @@ class sink; // NOLINT(readability-identifier-naming) } // namespace spdlog struct BenchmarkLoggingInitializer; +class LoggerFixture; +struct LogServiceInitTests; namespace util { +namespace impl { +class OnAssert; +} // namespace impl + namespace config { class ClioConfigDefinition; } // namespace config @@ -228,27 +235,78 @@ private: Logger(std::shared_ptr logger); }; +/** + * @brief Base state management class for the logging service. + * + * This class manages the global state and core functionality for the logging system, + * including initialization, sink management, and logger registration. + */ +class LogServiceState { +protected: + friend struct ::LogServiceInitTests; + friend class ::LoggerFixture; + friend class Logger; + friend class ::util::impl::OnAssert; + + /** + * @brief Initialize the logging core with specified parameters. + * + * @param isAsync Whether logging should be asynchronous + * @param defaultSeverity The default severity level for new loggers + * @param sinks Vector of spdlog sinks to use for output + */ + static void + init(bool isAsync, Severity defaultSeverity, std::vector> const& sinks); + + /** + * @brief Whether the LogService is initialized or not + * + * @return true if the LogService is initialized + */ + [[nodiscard]] static bool + initialized(); + + /** + * @brief Reset the logging service to uninitialized state. + */ + static void + reset(); + + /** + * @brief Replace the current sinks with a new set of sinks. + * + * @param sinks Vector of new spdlog sinks to replace the current ones + */ + static void + replaceSinks(std::vector> const& sinks); + + /** + * @brief Register a new logger for the specified channel. + * + * Creates and registers a new spdlog logger instance for the given channel + * with the specified or default severity level. + * + * @param channel The name of the logging channel + * @param severity Optional severity level override; uses default if not specified + * @return Shared pointer to the registered spdlog logger + */ + static std::shared_ptr + registerLogger(std::string const& channel, std::optional severity = std::nullopt); + +protected: + static bool isAsync_; // NOLINT(readability-identifier-naming) + static Severity defaultSeverity_; // NOLINT(readability-identifier-naming) + static std::vector> sinks_; // NOLINT(readability-identifier-naming) + static bool initialized_; // NOLINT(readability-identifier-naming) +}; + /** * @brief A global logging service. * * Used to initialize and setup the logging core as well as a globally available * entrypoint for logging into the `General` channel as well as raising alerts. */ -class LogService { - struct Data { - bool isAsync; - Severity defaultSeverity; - std::vector> allSinks; - }; - - friend class Logger; - -private: - static Data data; - - static std::shared_ptr - registerLogger(std::string const& channel, Severity severity = data.defaultSeverity); - +class LogService : public LogServiceState { public: LogService() = delete; @@ -321,15 +379,16 @@ public: [[nodiscard]] static Logger::Pump fatal(SourceLocationType const& loc = CURRENT_SRC_LOCATION); - /** - * @brief Whether the LogService is enabled or not - * - * @return true if the LogService is enabled, false otherwise - */ - [[nodiscard]] static bool - enabled(); - private: + /** + * @brief Parses the sinks from a @ref config::ClioConfigDefinition + * + * @param config The configuration to parse sinks from + * @return A vector of sinks on success, error message on failure + */ + [[nodiscard]] static std::expected>, std::string> + getSinks(config::ClioConfigDefinition const& config); + struct FileLoggingParams { std::string logDir; diff --git a/src/web/ng/Connection.hpp b/src/web/ng/Connection.hpp index d9696dc6..309bb7d2 100644 --- a/src/web/ng/Connection.hpp +++ b/src/web/ng/Connection.hpp @@ -143,9 +143,9 @@ public: * * @param response The response to send. * @param yield The yield context. - * @return An error if the operation failed or nullopt if it succeeded. + * @return An error if the operation fails, otherwise nothing. */ - virtual std::optional + virtual std::expected send(Response response, boost::asio::yield_context yield) = 0; /** diff --git a/src/web/ng/Server.cpp b/src/web/ng/Server.cpp index 53abd855..5726f1a8 100644 --- a/src/web/ng/Server.cpp +++ b/src/web/ng/Server.cpp @@ -147,9 +147,9 @@ makeConnection( tagDecoratorFactory ); sslConnection->setTimeout(std::chrono::seconds{10}); - auto const maybeError = sslConnection->sslHandshake(yield); - if (maybeError.has_value()) - return std::unexpected{fmt::format("SSL handshake error: {}", maybeError->message())}; + auto const expectedSuccess = sslConnection->sslHandshake(yield); + if (not expectedSuccess.has_value()) + return std::unexpected{fmt::format("SSL handshake error: {}", expectedSuccess.error().message())}; connection = std::move(sslConnection); } else { diff --git a/src/web/ng/SubscriptionContext.cpp b/src/web/ng/SubscriptionContext.cpp index 5cf07b6e..f3e76feb 100644 --- a/src/web/ng/SubscriptionContext.cpp +++ b/src/web/ng/SubscriptionContext.cpp @@ -70,8 +70,8 @@ SubscriptionContext::send(std::shared_ptr message) } tasksGroup_.spawn(yield_, [this, message = std::move(message)](boost::asio::yield_context innerYield) mutable { - auto const maybeError = connection_.get().sendShared(std::move(message), innerYield); - if (maybeError.has_value() and errorHandler_(*maybeError, connection_)) { + auto const expectedSuccess = connection_.get().sendShared(std::move(message), innerYield); + if (not expectedSuccess.has_value() and errorHandler_(expectedSuccess.error(), connection_)) { connection_.get().close(innerYield); gotError_ = true; } diff --git a/src/web/ng/impl/ConnectionHandler.cpp b/src/web/ng/impl/ConnectionHandler.cpp index c8cdc6e1..d188f963 100644 --- a/src/web/ng/impl/ConnectionHandler.cpp +++ b/src/web/ng/impl/ConnectionHandler.cpp @@ -365,9 +365,9 @@ ConnectionHandler::processRequest( auto response = handleRequest(connection, subscriptionContext, request, yield); LOG(log_.trace()) << connection.tag() << "Sending response: " << response.message(); - auto const maybeError = connection.send(std::move(response), yield); - if (maybeError.has_value()) { - return handleError(maybeError.value(), connection); + auto const expectedSuccess = connection.send(std::move(response), yield); + if (not expectedSuccess.has_value()) { + return handleError(expectedSuccess.error(), connection); } return std::nullopt; } diff --git a/src/web/ng/impl/HttpConnection.hpp b/src/web/ng/impl/HttpConnection.hpp index f64aa868..714284b2 100644 --- a/src/web/ng/impl/HttpConnection.hpp +++ b/src/web/ng/impl/HttpConnection.hpp @@ -62,7 +62,7 @@ public: virtual std::expected upgrade(util::TagDecoratorFactory const& tagDecoratorFactory, boost::asio::yield_context yield) = 0; - virtual std::optional + virtual std::expected sendRaw( boost::beast::http::response response, boost::asio::yield_context yield @@ -123,7 +123,7 @@ public: HttpConnection& operator=(HttpConnection const& other) = delete; - std::optional + std::expected sslHandshake(boost::asio::yield_context yield) requires IsSslTcpStream { @@ -132,11 +132,11 @@ public: auto const bytesUsed = stream_.async_handshake(boost::asio::ssl::stream_base::server, buffer_.cdata(), yield[error]); if (error) - return error; + return std::unexpected{error}; buffer_.consume(bytesUsed); - return std::nullopt; + return {}; } bool @@ -145,7 +145,7 @@ public: return false; } - std::optional + std::expected sendRaw( boost::beast::http::response response, boost::asio::yield_context yield @@ -160,7 +160,7 @@ public: timeout_ = newTimeout; } - std::optional + std::expected send(Response response, boost::asio::yield_context yield) override { auto httpResponse = std::move(response).intoHttpResponse(); diff --git a/src/web/ng/impl/SendingQueue.hpp b/src/web/ng/impl/SendingQueue.hpp index 4bd1a14d..2a1328c1 100644 --- a/src/web/ng/impl/SendingQueue.hpp +++ b/src/web/ng/impl/SendingQueue.hpp @@ -47,15 +47,15 @@ public: { } - std::optional + std::expected send(T message, boost::asio::yield_context yield) { if (error_) - return error_; + return std::unexpected{error_}; queue_.push(std::move(message)); if (isSending_) - return std::nullopt; + return {}; isSending_ = true; while (not queue_.empty() and not error_) { @@ -65,8 +65,8 @@ public: } isSending_ = false; if (error_) - return error_; - return std::nullopt; + return std::unexpected{error_}; + return {}; } }; diff --git a/src/web/ng/impl/WsConnection.hpp b/src/web/ng/impl/WsConnection.hpp index bf1263e2..ad28b0d0 100644 --- a/src/web/ng/impl/WsConnection.hpp +++ b/src/web/ng/impl/WsConnection.hpp @@ -59,7 +59,7 @@ class WsConnectionBase : public Connection { public: using Connection::Connection; - virtual std::optional + virtual std::expected sendShared(std::shared_ptr message, boost::asio::yield_context yield) = 0; }; @@ -108,14 +108,14 @@ public: WsConnection& operator=(WsConnection const&) = delete; - std::optional + std::expected performHandshake(boost::asio::yield_context yield) { Error error; stream_.async_accept(initialRequest_, yield[error]); if (error) - return error; - return std::nullopt; + return std::unexpected{error}; + return {}; } bool @@ -124,7 +124,7 @@ public: return true; } - std::optional + std::expected sendShared(std::shared_ptr message, boost::asio::yield_context yield) override { return sendingQueue_.send(std::move(message), yield); @@ -140,7 +140,7 @@ public: stream_.set_option(wsTimeout); } - std::optional + std::expected send(Response response, boost::asio::yield_context yield) override { return sendingQueue_.send(std::move(response), yield); @@ -206,9 +206,9 @@ makeWsConnection( auto connection = std::make_unique>( std::forward(stream), std::move(ip), std::move(buffer), std::move(request), tagDecoratorFactory ); - auto maybeError = connection->performHandshake(yield); - if (maybeError.has_value()) - return std::unexpected{maybeError.value()}; + auto const expectedSuccess = connection->performHandshake(yield); + if (not expectedSuccess.has_value()) + return std::unexpected{expectedSuccess.error()}; return connection; } diff --git a/tests/common/util/AsioContextTestFixture.hpp b/tests/common/util/AsioContextTestFixture.hpp index e98670a9..2c1fb536 100644 --- a/tests/common/util/AsioContextTestFixture.hpp +++ b/tests/common/util/AsioContextTestFixture.hpp @@ -40,7 +40,7 @@ * * This is meant to be used as a base for other fixtures. */ -struct AsyncAsioContextTest : virtual public NoLoggerFixture { +struct AsyncAsioContextTest : virtual public ::testing::Test { AsyncAsioContextTest() { work_.emplace(boost::asio::make_work_guard(ctx_)); // make sure ctx does not stop on its own @@ -79,7 +79,7 @@ private: * Use `run_for(duration)` etc. directly on `ctx`. * This is meant to be used as a base for other fixtures. */ -struct SyncAsioContextTest : virtual public NoLoggerFixture { +struct SyncAsioContextTest : virtual public ::testing::Test { template void runCoroutine(F&& f, bool allowMockLeak = false) diff --git a/tests/common/util/LoggerBuffer.hpp b/tests/common/util/LoggerBuffer.hpp new file mode 100644 index 00000000..5e7f3e15 --- /dev/null +++ b/tests/common/util/LoggerBuffer.hpp @@ -0,0 +1,43 @@ +//------------------------------------------------------------------------------ +/* + This file is part of clio: https://github.com/XRPLF/clio + Copyright (c) 2025, the clio developers. + + Permission to use, copy, modify, and distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#pragma once +#include "util/StringBuffer.hpp" + +#include +#include + +class LoggerBuffer { +public: + std::string + getStrAndReset() + { + return buffer_.getStrAndReset(); + } + + std::ostream& + getStream() + { + return stream_; + } + +private: + StringBuffer buffer_; + std::ostream stream_ = std::ostream{&buffer_}; +}; diff --git a/tests/common/util/LoggerFixtures.cpp b/tests/common/util/LoggerFixtures.cpp index d0649be6..23a8710e 100644 --- a/tests/common/util/LoggerFixtures.cpp +++ b/tests/common/util/LoggerFixtures.cpp @@ -22,6 +22,7 @@ #include "util/log/Logger.hpp" #include +#include #include #include #include @@ -29,32 +30,40 @@ #include #include -LoggerFixture::LoggerFixture() +void +LoggerFixture::init() { - // Clear any existing loggers - spdlog::drop_all(); + util::LogServiceState::init(false, util::Severity::FTL, {}); - // Create ostream sink for testing - auto ostreamSink = std::make_shared(stream_); - ostreamSink->set_formatter(std::make_unique("%^%3!l:%n%$ - %v")); - - // Create loggers for each channel - std::ranges::for_each(util::Logger::kCHANNELS, [&ostreamSink](char const* channel) { - auto logger = std::make_shared(channel, ostreamSink); - logger->set_level(spdlog::level::trace); - spdlog::register_logger(logger); + std::ranges::for_each(util::Logger::kCHANNELS, [](char const* channel) { + util::LogService::registerLogger(channel); }); - spdlog::get("General")->set_level(spdlog::level::debug); - - auto traceLogger = std::make_shared("Trace", ostreamSink); - traceLogger->set_level(spdlog::level::trace); - spdlog::register_logger(traceLogger); - spdlog::set_default_logger(spdlog::get("General")); } -NoLoggerFixture::NoLoggerFixture() +void +LoggerFixture::resetTestingLoggers() { - spdlog::set_level(spdlog::level::off); + auto ostreamSink = std::make_shared(buffer_.getStream()); + ostreamSink->set_formatter(std::make_unique("%^%3!l:%n%$ - %v")); + ostreamSink->set_level(spdlog::level::trace); + util::LogServiceState::replaceSinks({ostreamSink}); + + spdlog::apply_all([](std::shared_ptr logger) { logger->set_level(spdlog::level::trace); }); + spdlog::get("General")->set_level(spdlog::level::debug); +} + +LoggerFixture::LoggerFixture() +{ + util::LogServiceState::reset(); + util::LogServiceState::init(false, util::Severity::TRC, {}); + + resetTestingLoggers(); +} + +LoggerFixture::~LoggerFixture() +{ + util::LogServiceState::replaceSinks({}); + spdlog::apply_all([](std::shared_ptr logger) { logger->set_level(spdlog::level::critical); }); } diff --git a/tests/common/util/LoggerFixtures.hpp b/tests/common/util/LoggerFixtures.hpp index e221ad16..1d4633fa 100644 --- a/tests/common/util/LoggerFixtures.hpp +++ b/tests/common/util/LoggerFixtures.hpp @@ -19,37 +19,39 @@ #pragma once -#include "util/StringBuffer.hpp" +#include "util/LoggerBuffer.hpp" #include -#include #include /** * @brief A fixture for testing LogService and Logger. */ class LoggerFixture : virtual public ::testing::Test { - StringBuffer buffer_; - std::ostream stream_ = std::ostream{&buffer_}; +protected: + LoggerBuffer buffer_; public: - // Simulates the `util::LogService::init(config)` call LoggerFixture(); + ~LoggerFixture() override; + + /** + * @brief Sets up spdlog loggers for each channel. Should be called once before using any loggers. + * Simulates the `util::LogService::init(config)` call + */ + static void + init(); protected: + [[nodiscard]] std::string getLoggerString() { return buffer_.getStrAndReset(); } -}; -/** - * @brief Fixture with util::Logger support but completely disabled logging. - * - * This is meant to be used as a base for other fixtures. - */ -struct NoLoggerFixture : virtual LoggerFixture { - NoLoggerFixture(); +private: + void + resetTestingLoggers(); }; diff --git a/tests/common/util/MockBackendTestFixture.hpp b/tests/common/util/MockBackendTestFixture.hpp index 4e47e5c3..06c84e37 100644 --- a/tests/common/util/MockBackendTestFixture.hpp +++ b/tests/common/util/MockBackendTestFixture.hpp @@ -29,7 +29,7 @@ #include template