Compare commits

..

2 Commits

Author SHA1 Message Date
Nik Bougalis
82de944b30 Set version to 0.50.3 2017-03-13 15:55:03 -07:00
seelabs
fb31380abd Enforce rippling constraints during payments 2017-03-13 15:36:46 -07:00
4614 changed files with 1020189 additions and 506129 deletions

View File

@@ -1,97 +0,0 @@
---
Language: Cpp
AccessModifierOffset: -4
AlignAfterOpenBracket: AlwaysBreak
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlinesLeft: true
AlignOperands: false
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: false
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterReturnType: All
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: false
BinPackParameters: false
BraceWrapping:
AfterClass: true
AfterControlStatement: true
AfterEnum: false
AfterFunction: true
AfterNamespace: false
AfterObjCDeclaration: true
AfterStruct: true
AfterUnion: true
BeforeCatch: true
BeforeElse: true
IndentBraces: false
BreakBeforeBinaryOperators: false
BreakBeforeBraces: Custom
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
ForEachMacros: [ Q_FOREACH, BOOST_FOREACH ]
IncludeBlocks: Regroup
IncludeCategories:
- Regex: '^<(test)/'
Priority: 0
- Regex: '^<(xrpld)/'
Priority: 1
- Regex: '^<(xrpl)/'
Priority: 2
- Regex: '^<(boost)/'
Priority: 3
- Regex: '^.*/'
Priority: 4
- Regex: '^.*\.h'
Priority: 5
- Regex: '.*'
Priority: 6
IncludeIsMainRegex: '$'
IndentCaseLabels: true
IndentFunctionDeclarationAfterType: false
IndentRequiresClause: true
IndentWidth: 4
IndentWrappedFunctionNames: false
KeepEmptyLinesAtTheStartOfBlocks: false
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: false
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
ReflowComments: true
RequiresClausePosition: OwnLine
SortIncludes: true
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
TabWidth: 8
UseTab: Never
QualifierAlignment: Right

View File

@@ -1,37 +0,0 @@
codecov:
require_ci_to_pass: true
comment:
behavior: default
layout: reach,diff,flags,tree,reach
show_carryforward_flags: false
coverage:
range: "70..85"
precision: 1
round: nearest
status:
project:
default:
target: 75%
threshold: 2%
patch:
default:
target: auto
threshold: 2%
changes: false
github_checks:
annotations: true
parsers:
cobertura:
partials_as_hits: true
handle_missing_conditions : true
slack_app: false
ignore:
- "src/test/"
- "include/xrpl/beast/test/"
- "include/xrpl/beast/unit_test/"

View File

@@ -1,13 +0,0 @@
# This feature requires Git >= 2.24
# To use it by default in git blame:
# git config blame.ignoreRevsFile .git-blame-ignore-revs
50760c693510894ca368e90369b0cc2dabfd07f3
e2384885f5f630c8f0ffe4bf21a169b433a16858
241b9ddde9e11beb7480600fd5ed90e1ef109b21
760f16f56835663d9286bd29294d074de26a7ba6
0eebe6a5f4246fced516d52b83ec4e7f47373edd
2189cc950c0cebb89e4e2fa3b2d8817205bf7cef
b9d007813378ad0ff45660dc07285b823c7e9855
fe9a5365b8a52d4acc42eb27369247e6f238a4f9
9a93577314e6a8d4b4a8368cc9d2b15a5d8303e8
552377c76f55b403a1c876df873a23d780fcc81c

8
.github/CODEOWNERS vendored
View File

@@ -1,8 +0,0 @@
# Allow anyone to review any change by default.
*
# Require the rpc-reviewers team to review changes to the rpc code.
include/xrpl/protocol/ @xrplf/rpc-reviewers
src/libxrpl/protocol/ @xrplf/rpc-reviewers
src/xrpld/rpc/ @xrplf/rpc-reviewers
src/xrpld/app/misc/ @xrplf/rpc-reviewers

View File

@@ -1,31 +0,0 @@
---
name: Bug Report
about: Create a report to help us improve rippled
title: "[Title with short description] (Version: [rippled version])"
labels: ''
assignees: ''
---
<!-- Please search existing issues to avoid creating duplicates.-->
## Issue Description
<!--Provide a summary for your issue/bug.-->
## Steps to Reproduce
<!--List in detail the exact steps to reproduce the unexpected behavior of the software.-->
## Expected Result
<!--Explain in detail what behavior you expected to happen.-->
## Actual Result
<!--Explain in detail what behavior actually happened.-->
## Environment
<!--Please describe your environment setup (such as Ubuntu 18.04 with Boost 1.70).-->
<!-- If you are using a formal release, please use the version returned by './rippled --version' as the version number-->
<!-- If you are working off of develop, please add the git hash via 'git rev-parse HEAD'-->
## Supporting Files
<!--If you have supporting files such as a log, feel free to post a link here using Github Gist.-->
<!--Consider adding configuration files with private information removed via Github Gist. -->

View File

@@ -1,8 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: XRP Ledger Documentation
url: https://xrpl.org/
about: All things about XRPL
- name: Security bug bounty program
url: https://ripple.com/bug-bounty/
about: Please report security-relevant bugs in our software here.

View File

@@ -1,21 +0,0 @@
---
name: Feature Request
about: Suggest a new feature for the rippled project
title: "[Title with short description] (Version: [rippled version])"
labels: Feature Request
assignees: ''
---
<!-- Please search existing issues to avoid creating duplicates.-->
## Summary
<!-- Provide a summary to the feature request-->
## Motivation
<!-- Why do we need this feature?-->
## Solution
<!-- What is the solution?-->
## Paths Not Taken
<!-- What other alternatives have been considered?-->

View File

@@ -1,34 +0,0 @@
name: build
inputs:
generator:
default: null
configuration:
required: true
cmake-args:
default: null
cmake-target:
default: all
# An implicit input is the environment variable `build_dir`.
runs:
using: composite
steps:
- name: configure
shell: bash
run: |
cd ${build_dir}
cmake \
${{ inputs.generator && format('-G "{0}"', inputs.generator) || '' }} \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE=${{ inputs.configuration }} \
-Dtests=TRUE \
-Dxrpld=TRUE \
${{ inputs.cmake-args }} \
..
- name: build
shell: bash
run: |
cmake \
--build ${build_dir} \
--config ${{ inputs.configuration }} \
--parallel ${NUM_PROCESSORS:-$(nproc)} \
--target ${{ inputs.cmake-target }}

View File

@@ -1,57 +0,0 @@
name: dependencies
inputs:
configuration:
required: true
# An implicit input is the environment variable `build_dir`.
runs:
using: composite
steps:
- name: unlock Conan
shell: bash
run: conan remove --locks
- name: export custom recipes
shell: bash
run: |
conan config set general.revisions_enabled=1
conan export external/snappy snappy/1.1.10@
conan export external/rocksdb rocksdb/9.7.3@
conan export external/soci soci/4.0.3@
conan export external/nudb nudb/2.0.8@
- name: add Ripple Conan remote
shell: bash
run: |
conan remote list
conan remote remove ripple || true
# Do not quote the URL. An empty string will be accepted (with
# a non-fatal warning), but a missing argument will not.
conan remote add ripple ${{ env.CONAN_URL }} --insert 0
- name: try to authenticate to Ripple Conan remote
id: remote
shell: bash
run: |
# `conan user` implicitly uses the environment variables
# CONAN_LOGIN_USERNAME_<REMOTE> and CONAN_PASSWORD_<REMOTE>.
# https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables
# https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name
# https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name
echo outcome=$(conan user --remote ripple --password >&2 \
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
- name: list missing binaries
id: binaries
shell: bash
# Print the list of dependencies that would need to be built locally.
# A non-empty list means we have "failed" to cache binaries remotely.
run: |
echo missing=$(conan info . --build missing --settings build_type=${{ inputs.configuration }} --json 2>/dev/null | grep '^\[') | tee ${GITHUB_OUTPUT}
- name: install dependencies
shell: bash
run: |
mkdir ${build_dir}
cd ${build_dir}
conan install \
--output-folder . \
--build missing \
--options tests=True \
--options xrpld=True \
--settings build_type=${{ inputs.configuration }} \
..

View File

@@ -1,85 +0,0 @@
<!--
This PR template helps you to write a good pull request description.
Please feel free to include additional useful information even beyond what is requested below.
If your branch is on a personal fork and has a name that allows it to
run CI build/test jobs (e.g. "ci/foo"), remember to rename it BEFORE
opening the PR. This avoids unnecessary redundant test runs. Renaming
the branch after opening the PR will close the PR.
https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-branches-in-your-repository/renaming-a-branch
-->
## High Level Overview of Change
<!--
Please include a summary of the changes.
This may be a direct input to the release notes.
If too broad, please consider splitting into multiple PRs.
If a relevant task or issue, please link it here.
-->
### Context of Change
<!--
Please include the context of a change.
If a bug fix, when was the bug introduced? What was the behavior?
If a new feature, why was this architecture chosen? What were the alternatives?
If a refactor, how is this better than the previous implementation?
If there is a spec or design document for this feature, please link it here.
-->
### Type of Change
<!--
Please check [x] relevant options, delete irrelevant ones.
-->
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] Refactor (non-breaking change that only restructures code)
- [ ] Performance (increase or change in throughput and/or latency)
- [ ] Tests (you added tests for code that already exists, or your new feature included in this PR)
- [ ] Documentation update
- [ ] Chore (no impact to binary, e.g. `.gitignore`, formatting, dropping support for older tooling)
- [ ] Release
### API Impact
<!--
Please check [x] relevant options, delete irrelevant ones.
* If there is any impact to the public API methods (HTTP / WebSocket), please update https://github.com/xrplf/rippled/blob/develop/API-CHANGELOG.md
* Update API-CHANGELOG.md and add the change directly in this PR by pushing to your PR branch.
* libxrpl: See https://github.com/XRPLF/rippled/blob/develop/docs/build/depend.md
* Peer Protocol: See https://xrpl.org/peer-protocol.html
-->
- [ ] Public API: New feature (new methods and/or new fields)
- [ ] Public API: Breaking change (in general, breaking changes should only impact the next api_version)
- [ ] `libxrpl` change (any change that may affect `libxrpl` or dependents of `libxrpl`)
- [ ] Peer protocol change (must be backward compatible or bump the peer protocol version)
<!--
## Before / After
If relevant, use this section for an English description of the change at a technical level.
If this change affects an API, examples should be included here.
For performance-impacting changes, please provide these details:
1. Is this a new feature, bug fix, or improvement to existing functionality?
2. What behavior/functionality does the change impact?
3. In what processing can the impact be measured? Be as specific as possible - e.g. RPC client call, payment transaction that involves LOB, AMM, caching, DB operations, etc.
4. Does this change affect concurrent processing - e.g. does it involve acquiring locks, multi-threaded processing, or async processing?
-->
<!--
## Test Plan
If helpful, please describe the tests that you ran to verify your changes and provide instructions so that others can reproduce.
This section may not be needed if your change includes thoroughly commented unit tests.
-->
<!--
## Future Tasks
For future tasks related to PR.
-->

View File

@@ -1,63 +0,0 @@
name: clang-format
on:
push:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
jobs:
check:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
runs-on: ubuntu-24.04
env:
CLANG_VERSION: 18
steps:
- uses: actions/checkout@v4
- name: Install clang-format
run: |
codename=$( lsb_release --codename --short )
sudo tee /etc/apt/sources.list.d/llvm.list >/dev/null <<EOF
deb http://apt.llvm.org/${codename}/ llvm-toolchain-${codename}-${CLANG_VERSION} main
deb-src http://apt.llvm.org/${codename}/ llvm-toolchain-${codename}-${CLANG_VERSION} main
EOF
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add
sudo apt-get update
sudo apt-get install clang-format-${CLANG_VERSION}
- name: Format first-party sources
run: find include src tests -type f \( -name '*.cpp' -o -name '*.hpp' -o -name '*.h' -o -name '*.ipp' \) -exec clang-format-${CLANG_VERSION} -i {} +
- name: Check for differences
id: assert
run: |
set -o pipefail
git diff --exit-code | tee "clang-format.patch"
- name: Upload patch
if: failure() && steps.assert.outcome == 'failure'
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: clang-format.patch
if-no-files-found: ignore
path: clang-format.patch
- name: What happened?
if: failure() && steps.assert.outcome == 'failure'
env:
PREAMBLE: |
If you are reading this, you are looking at a failed Github Actions
job. That means you pushed one or more files that did not conform
to the formatting specified in .clang-format. That may be because
you neglected to run 'git clang-format' or 'clang-format' before
committing, or that your version of clang-format has an
incompatibility with the one on this
machine, which is:
SUGGESTION: |
To fix it, you can do one of two things:
1. Download and apply the patch generated as an artifact of this
job to your repo, commit, and push.
2. Run 'git-clang-format --extensions cpp,h,hpp,ipp develop'
in your repo, commit, and push.
run: |
echo "${PREAMBLE}"
clang-format-${CLANG_VERSION} --version
echo "${SUGGESTION}"
exit 1

View File

@@ -1,37 +0,0 @@
name: Build and publish Doxygen documentation
# To test this workflow, push your changes to your fork's `develop` branch.
on:
push:
branches:
- develop
- doxygen
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
documentation:
runs-on: ubuntu-latest
permissions:
contents: write
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
steps:
- name: checkout
uses: actions/checkout@v4
- name: check environment
run: |
echo ${PATH} | tr ':' '\n'
cmake --version
doxygen --version
env | sort
- name: build
run: |
mkdir build
cd build
cmake -Donly_docs=TRUE ..
cmake --build . --target docs --parallel $(nproc)
- name: publish
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: build/docs/html

View File

@@ -1,53 +0,0 @@
name: levelization
on:
push:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
jobs:
check:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
runs-on: ubuntu-latest
env:
CLANG_VERSION: 10
steps:
- uses: actions/checkout@v4
- name: Check levelization
run: Builds/levelization/levelization.sh
- name: Check for differences
id: assert
run: |
set -o pipefail
git diff --exit-code | tee "levelization.patch"
- name: Upload patch
if: failure() && steps.assert.outcome == 'failure'
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: levelization.patch
if-no-files-found: ignore
path: levelization.patch
- name: What happened?
if: failure() && steps.assert.outcome == 'failure'
env:
MESSAGE: |
If you are reading this, you are looking at a failed Github
Actions job. That means you changed the dependency relationships
between the modules in rippled. That may be an improvement or a
regression. This check doesn't judge.
A rule of thumb, though, is that if your changes caused
something to be removed from loops.txt, that's probably an
improvement. If something was added, it's probably a regression.
To fix it, you can do one of two things:
1. Download and apply the patch generated as an artifact of this
job to your repo, commit, and push.
2. Run './Builds/levelization/levelization.sh' in your repo,
commit, and push.
See Builds/levelization/README.md for more info.
run: |
echo "${MESSAGE}"
exit 1

View File

@@ -1,91 +0,0 @@
name: Check libXRPL compatibility with Clio
env:
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
on:
pull_request:
paths:
- 'src/libxrpl/protocol/BuildInfo.cpp'
- '.github/workflows/libxrpl.yml'
types: [opened, reopened, synchronize, ready_for_review]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
publish:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
name: Publish libXRPL
outputs:
outcome: ${{ steps.upload.outputs.outcome }}
version: ${{ steps.version.outputs.version }}
channel: ${{ steps.channel.outputs.channel }}
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
steps:
- name: Wait for essential checks to succeed
uses: lewagon/wait-on-check-action@v1.3.4
with:
ref: ${{ github.event.pull_request.head.sha || github.sha }}
running-workflow-name: wait-for-check-regexp
check-regexp: '(dependencies|test).*linux.*' # Ignore windows and mac tests but make sure linux passes
repo-token: ${{ secrets.GITHUB_TOKEN }}
wait-interval: 10
- name: Checkout
uses: actions/checkout@v4
- name: Generate channel
id: channel
shell: bash
run: |
echo channel="clio/pr_${{ github.event.pull_request.number }}" | tee ${GITHUB_OUTPUT}
- name: Export new package
shell: bash
run: |
conan export . ${{ steps.channel.outputs.channel }}
- name: Add Ripple Conan remote
shell: bash
run: |
conan remote list
conan remote remove ripple || true
# Do not quote the URL. An empty string will be accepted (with a non-fatal warning), but a missing argument will not.
conan remote add ripple ${{ env.CONAN_URL }} --insert 0
- name: Parse new version
id: version
shell: bash
run: |
echo version="$(cat src/libxrpl/protocol/BuildInfo.cpp | grep "versionString =" \
| awk -F '"' '{print $2}')" | tee ${GITHUB_OUTPUT}
- name: Try to authenticate to Ripple Conan remote
id: remote
shell: bash
run: |
# `conan user` implicitly uses the environment variables CONAN_LOGIN_USERNAME_<REMOTE> and CONAN_PASSWORD_<REMOTE>.
# https://docs.conan.io/1/reference/commands/misc/user.html#using-environment-variables
# https://docs.conan.io/1/reference/env_vars.html#conan-login-username-conan-login-username-remote-name
# https://docs.conan.io/1/reference/env_vars.html#conan-password-conan-password-remote-name
echo outcome=$(conan user --remote ripple --password >&2 \
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
- name: Upload new package
id: upload
if: (steps.remote.outputs.outcome == 'success')
shell: bash
run: |
echo "conan upload version ${{ steps.version.outputs.version }} on channel ${{ steps.channel.outputs.channel }}"
echo outcome=$(conan upload xrpl/${{ steps.version.outputs.version }}@${{ steps.channel.outputs.channel }} --remote ripple --confirm >&2 \
&& echo success || echo failure) | tee ${GITHUB_OUTPUT}
notify_clio:
name: Notify Clio
runs-on: ubuntu-latest
needs: publish
env:
GH_TOKEN: ${{ secrets.CLIO_NOTIFY_TOKEN }}
steps:
- name: Notify Clio about new version
if: (needs.publish.outputs.outcome == 'success')
shell: bash
run: |
gh api --method POST -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" \
/repos/xrplf/clio/dispatches -f "event_type=check_libxrpl" \
-F "client_payload[version]=${{ needs.publish.outputs.version }}@${{ needs.publish.outputs.channel }}" \
-F "client_payload[pr]=${{ github.event.pull_request.number }}"

View File

@@ -1,99 +0,0 @@
name: macos
on:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
push:
# If the branches list is ever changed, be sure to change it on all
# build/test jobs (nix, macos, windows, instrumentation)
branches:
# Always build the package branches
- develop
- release
- master
# Branches that opt-in to running
- 'ci/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
test:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
strategy:
matrix:
platform:
- macos
generator:
- Ninja
configuration:
- Release
runs-on: [self-hosted, macOS]
env:
# The `build` action requires these variables.
build_dir: .build
NUM_PROCESSORS: 12
steps:
- name: checkout
uses: actions/checkout@v4
- name: install Conan
run: |
brew install conan@1
echo '/opt/homebrew/opt/conan@1/bin' >> $GITHUB_PATH
- name: install Ninja
if: matrix.generator == 'Ninja'
run: brew install ninja
- name: install python
run: |
if which python > /dev/null 2>&1; then
echo "Python executable exists"
else
brew install python@3.13
ln -s /opt/homebrew/bin/python3 /opt/homebrew/bin/python
fi
- name: install cmake
run: |
if which cmake > /dev/null 2>&1; then
echo "cmake executable exists"
else
brew install cmake
fi
- name: install nproc
run: |
brew install coreutils
- name: check environment
run: |
env | sort
echo ${PATH} | tr ':' '\n'
python --version
conan --version
cmake --version
nproc --version
echo -n "nproc returns: "
nproc
system_profiler SPHardwareDataType
sysctl -n hw.logicalcpu
clang --version
- name: configure Conan
run : |
conan profile new default --detect || true
conan profile update settings.compiler.cppstd=20 default
- name: build dependencies
uses: ./.github/actions/dependencies
env:
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
with:
configuration: ${{ matrix.configuration }}
- name: build
uses: ./.github/actions/build
with:
generator: ${{ matrix.generator }}
configuration: ${{ matrix.configuration }}
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
- name: test
run: |
n=$(nproc)
echo "Using $n test jobs"
${build_dir}/rippled --unittest --unittest-jobs $n

View File

@@ -1,60 +0,0 @@
name: missing-commits
on:
push:
branches:
# Only check that the branches are up to date when updating the
# relevant branches.
- develop
- release
jobs:
up_to_date:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check for missing commits
id: commits
env:
SUGGESTION: |
If you are reading this, then the commits indicated above are
missing from "develop" and/or "release". Do a reverse-merge
as soon as possible. See CONTRIBUTING.md for instructions.
run: |
set -o pipefail
# Branches ordered by how "canonical" they are. Every commit in
# one branch should be in all the branches behind it
order=( master release develop )
branches=()
for branch in "${order[@]}"
do
# Check that the branches exist so that this job will work on
# forked repos, which don't necessarily have master and
# release branches.
if git ls-remote --exit-code --heads origin \
refs/heads/${branch} > /dev/null
then
branches+=( origin/${branch} )
fi
done
prior=()
for branch in "${branches[@]}"
do
if [[ ${#prior[@]} -ne 0 ]]
then
echo "Checking ${prior[@]} for commits missing from ${branch}"
git log --oneline --no-merges "${prior[@]}" \
^$branch | tee -a "missing-commits.txt"
echo
fi
prior+=( "${branch}" )
done
if [[ $( cat missing-commits.txt | wc -l ) -ne 0 ]]
then
echo "${SUGGESTION}"
exit 1
fi

View File

@@ -1,443 +0,0 @@
name: nix
on:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
push:
# If the branches list is ever changed, be sure to change it on all
# build/test jobs (nix, macos, windows)
branches:
# Always build the package branches
- develop
- release
- master
# Branches that opt-in to running
- "ci/**"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
# This workflow has multiple job matrixes.
# They can be considered phases because most of the matrices ("test",
# "coverage", "conan", ) depend on the first ("dependencies").
#
# The first phase has a job in the matrix for each combination of
# variables that affects dependency ABI:
# platform, compiler, and configuration.
# It creates a GitHub artifact holding the Conan profile,
# and builds and caches binaries for all the dependencies.
# If an Artifactory remote is configured, they are cached there.
# If not, they are added to the GitHub artifact.
# GitHub's "cache" action has a size limit (10 GB) that is too small
# to hold the binaries if they are built locally.
# We must use the "{upload,download}-artifact" actions instead.
#
# The remaining phases have a job in the matrix for each test
# configuration. They install dependency binaries from the cache,
# whichever was used, and build and test rippled.
#
# "instrumentation" is independent, but is included here because it also
# builds on linux in the same "on:" conditions.
jobs:
dependencies:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
strategy:
fail-fast: false
matrix:
platform:
- linux
compiler:
- gcc
- clang
configuration:
- Debug
- Release
include:
- compiler: gcc
profile:
version: 11
cc: /usr/bin/gcc
cxx: /usr/bin/g++
- compiler: clang
profile:
version: 14
cc: /usr/bin/clang-14
cxx: /usr/bin/clang++-14
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
env:
build_dir: .build
steps:
- name: upgrade conan
run: |
pip install --upgrade "conan<2"
- name: checkout
uses: actions/checkout@v4
- name: check environment
run: |
echo ${PATH} | tr ':' '\n'
lsb_release -a || true
${{ matrix.profile.cc }} --version
conan --version
cmake --version
env | sort
- name: configure Conan
run: |
conan profile new default --detect
conan profile update settings.compiler.cppstd=20 default
conan profile update settings.compiler=${{ matrix.compiler }} default
conan profile update settings.compiler.version=${{ matrix.profile.version }} default
conan profile update settings.compiler.libcxx=libstdc++11 default
conan profile update env.CC=${{ matrix.profile.cc }} default
conan profile update env.CXX=${{ matrix.profile.cxx }} default
conan profile update conf.tools.build:compiler_executables='{"c": "${{ matrix.profile.cc }}", "cpp": "${{ matrix.profile.cxx }}"}' default
- name: archive profile
# Create this archive before dependencies are added to the local cache.
run: tar -czf conan.tar -C ~/.conan .
- name: build dependencies
uses: ./.github/actions/dependencies
env:
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
with:
configuration: ${{ matrix.configuration }}
- name: upload archive
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
path: conan.tar
if-no-files-found: error
test:
strategy:
fail-fast: false
matrix:
platform:
- linux
compiler:
- gcc
- clang
configuration:
- Debug
- Release
cmake-args:
-
- "-Dunity=ON"
needs: dependencies
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
env:
build_dir: .build
steps:
- name: upgrade conan
run: |
pip install --upgrade "conan<2"
- name: download cache
uses: actions/download-artifact@v4
with:
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
- name: extract cache
run: |
mkdir -p ~/.conan
tar -xzf conan.tar -C ~/.conan
- name: check environment
run: |
env | sort
echo ${PATH} | tr ':' '\n'
conan --version
cmake --version
- name: checkout
uses: actions/checkout@v4
- name: dependencies
uses: ./.github/actions/dependencies
env:
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
with:
configuration: ${{ matrix.configuration }}
- name: build
uses: ./.github/actions/build
with:
generator: Ninja
configuration: ${{ matrix.configuration }}
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
- name: test
run: |
${build_dir}/rippled --unittest --unittest-jobs $(nproc)
reference-fee-test:
strategy:
fail-fast: false
matrix:
platform:
- linux
compiler:
- gcc
configuration:
- Debug
cmake-args:
- "-DUNIT_TEST_REFERENCE_FEE=200"
- "-DUNIT_TEST_REFERENCE_FEE=1000"
needs: dependencies
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
env:
build_dir: .build
steps:
- name: upgrade conan
run: |
pip install --upgrade "conan<2"
- name: download cache
uses: actions/download-artifact@v4
with:
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
- name: extract cache
run: |
mkdir -p ~/.conan
tar -xzf conan.tar -C ~/.conan
- name: check environment
run: |
env | sort
echo ${PATH} | tr ':' '\n'
conan --version
cmake --version
- name: checkout
uses: actions/checkout@v4
- name: dependencies
uses: ./.github/actions/dependencies
env:
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
with:
configuration: ${{ matrix.configuration }}
- name: build
uses: ./.github/actions/build
with:
generator: Ninja
configuration: ${{ matrix.configuration }}
cmake-args: "-Dassert=TRUE -Dwerr=TRUE ${{ matrix.cmake-args }}"
- name: test
run: |
${build_dir}/rippled --unittest --unittest-jobs $(nproc)
coverage:
strategy:
fail-fast: false
matrix:
platform:
- linux
compiler:
- gcc
configuration:
- Debug
needs: dependencies
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
env:
build_dir: .build
steps:
- name: upgrade conan
run: |
pip install --upgrade "conan<2"
- name: download cache
uses: actions/download-artifact@v4
with:
name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }}
- name: extract cache
run: |
mkdir -p ~/.conan
tar -xzf conan.tar -C ~/.conan
- name: install gcovr
run: pip install "gcovr>=7,<9"
- name: check environment
run: |
echo ${PATH} | tr ':' '\n'
conan --version
cmake --version
gcovr --version
env | sort
ls ~/.conan
- name: checkout
uses: actions/checkout@v4
- name: dependencies
uses: ./.github/actions/dependencies
env:
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
with:
configuration: ${{ matrix.configuration }}
- name: build
uses: ./.github/actions/build
with:
generator: Ninja
configuration: ${{ matrix.configuration }}
cmake-args: >-
-Dassert=TRUE
-Dwerr=TRUE
-Dcoverage=ON
-Dcoverage_format=xml
-DCODE_COVERAGE_VERBOSE=ON
-DCMAKE_CXX_FLAGS="-O0"
-DCMAKE_C_FLAGS="-O0"
cmake-target: coverage
- name: move coverage report
shell: bash
run: |
mv "${build_dir}/coverage.xml" ./
- name: archive coverage report
uses: actions/upload-artifact@v4
with:
name: coverage.xml
path: coverage.xml
retention-days: 30
- name: upload coverage report
uses: wandalen/wretry.action@v1.4.10
with:
action: codecov/codecov-action@v4.5.0
with: |
files: coverage.xml
fail_ci_if_error: true
disable_search: true
verbose: true
plugin: noop
token: ${{ secrets.CODECOV_TOKEN }}
attempt_limit: 5
attempt_delay: 210000 # in milliseconds
conan:
needs: dependencies
runs-on: [self-hosted, heavy]
container: ghcr.io/xrplf/rippled-build-ubuntu:aaf5e3e
env:
build_dir: .build
configuration: Release
steps:
- name: upgrade conan
run: |
pip install --upgrade "conan<2"
- name: download cache
uses: actions/download-artifact@v4
with:
name: linux-gcc-${{ env.configuration }}
- name: extract cache
run: |
mkdir -p ~/.conan
tar -xzf conan.tar -C ~/.conan
- name: check environment
run: |
env | sort
echo ${PATH} | tr ':' '\n'
conan --version
cmake --version
- name: checkout
uses: actions/checkout@v4
- name: dependencies
uses: ./.github/actions/dependencies
env:
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
with:
configuration: ${{ env.configuration }}
- name: export
run: |
version=$(conan inspect --raw version .)
reference="xrpl/${version}@local/test"
conan remove -f ${reference} || true
conan export . local/test
echo "reference=${reference}" >> "${GITHUB_ENV}"
- name: build
run: |
cd tests/conan
mkdir ${build_dir}
cd ${build_dir}
conan install .. --output-folder . \
--require-override ${reference} --build missing
cmake .. \
-DCMAKE_TOOLCHAIN_FILE:FILEPATH=./build/${configuration}/generators/conan_toolchain.cmake \
-DCMAKE_BUILD_TYPE=${configuration}
cmake --build .
./example | grep '^[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+'
# NOTE we are not using dependencies built above because it lags with
# compiler versions. Instrumentation requires clang version 16 or
# later
instrumentation-build:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
env:
CLANG_RELEASE: 16
strategy:
fail-fast: false
runs-on: [self-hosted, heavy]
container: debian:bookworm
steps:
- name: install prerequisites
env:
DEBIAN_FRONTEND: noninteractive
run: |
apt-get update
apt-get install --yes --no-install-recommends \
clang-${CLANG_RELEASE} clang++-${CLANG_RELEASE} \
python3-pip python-is-python3 make cmake git wget
apt-get clean
update-alternatives --install \
/usr/bin/clang clang /usr/bin/clang-${CLANG_RELEASE} 100 \
--slave /usr/bin/clang++ clang++ /usr/bin/clang++-${CLANG_RELEASE}
update-alternatives --auto clang
pip install --no-cache --break-system-packages "conan<2"
- name: checkout
uses: actions/checkout@v4
- name: prepare environment
run: |
mkdir ${GITHUB_WORKSPACE}/.build
echo "SOURCE_DIR=$GITHUB_WORKSPACE" >> $GITHUB_ENV
echo "BUILD_DIR=$GITHUB_WORKSPACE/.build" >> $GITHUB_ENV
echo "CC=/usr/bin/clang" >> $GITHUB_ENV
echo "CXX=/usr/bin/clang++" >> $GITHUB_ENV
- name: configure Conan
run: |
conan profile new --detect default
conan profile update settings.compiler=clang default
conan profile update settings.compiler.version=${CLANG_RELEASE} default
conan profile update settings.compiler.libcxx=libstdc++11 default
conan profile update settings.compiler.cppstd=20 default
conan profile update options.rocksdb=False default
conan profile update \
'conf.tools.build:compiler_executables={"c": "/usr/bin/clang", "cpp": "/usr/bin/clang++"}' default
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default
conan export external/snappy snappy/1.1.10@
conan export external/soci soci/4.0.3@
- name: build dependencies
run: |
cd ${BUILD_DIR}
conan install ${SOURCE_DIR} \
--output-folder ${BUILD_DIR} \
--install-folder ${BUILD_DIR} \
--build missing \
--settings build_type=Debug
- name: build with instrumentation
run: |
cd ${BUILD_DIR}
cmake -S ${SOURCE_DIR} -B ${BUILD_DIR} \
-Dvoidstar=ON \
-Dtests=ON \
-Dxrpld=ON \
-DCMAKE_BUILD_TYPE=Debug \
-DSECP256K1_BUILD_BENCHMARK=OFF \
-DSECP256K1_BUILD_TESTS=OFF \
-DSECP256K1_BUILD_EXHAUSTIVE_TESTS=OFF \
-DCMAKE_TOOLCHAIN_FILE=${BUILD_DIR}/build/generators/conan_toolchain.cmake
cmake --build . --parallel $(nproc)
- name: verify instrumentation enabled
run: |
cd ${BUILD_DIR}
./rippled --version | grep libvoidstar
- name: run unit tests
run: |
cd ${BUILD_DIR}
./rippled -u --unittest-jobs $(( $(nproc)/4 ))

View File

@@ -1,99 +0,0 @@
name: windows
on:
pull_request:
types: [opened, reopened, synchronize, ready_for_review]
push:
# If the branches list is ever changed, be sure to change it on all
# build/test jobs (nix, macos, windows, instrumentation)
branches:
# Always build the package branches
- develop
- release
- master
# Branches that opt-in to running
- 'ci/**'
# https://docs.github.com/en/actions/using-jobs/using-concurrency
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
test:
if: ${{ github.event_name == 'push' || github.event.pull_request.draft != true || contains(github.event.pull_request.labels.*.name, 'DraftRunCI') }}
strategy:
fail-fast: false
matrix:
version:
- generator: Visual Studio 17 2022
runs-on: windows-2022
configuration:
- type: Release
tests: true
- type: Debug
# Skip running unit tests on debug builds, because they
# take an unreasonable amount of time
tests: false
runtime: d
runs-on: ${{ matrix.version.runs-on }}
env:
build_dir: .build
steps:
- name: checkout
uses: actions/checkout@v4
- name: choose Python
uses: actions/setup-python@v5
with:
python-version: 3.9
- name: learn Python cache directory
id: pip-cache
shell: bash
run: |
python -m pip install --upgrade pip
echo "dir=$(pip cache dir)" | tee ${GITHUB_OUTPUT}
- name: restore Python cache directory
uses: actions/cache@v4
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }}
- name: install Conan
run: pip install wheel 'conan<2'
- name: check environment
run: |
dir env:
$env:PATH -split ';'
python --version
conan --version
cmake --version
- name: configure Conan
shell: bash
run: |
conan profile new default --detect
conan profile update settings.compiler.cppstd=20 default
conan profile update \
settings.compiler.runtime=MT${{ matrix.configuration.runtime }} \
default
- name: build dependencies
uses: ./.github/actions/dependencies
env:
CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod
CONAN_LOGIN_USERNAME_RIPPLE: ${{ secrets.CONAN_USERNAME }}
CONAN_PASSWORD_RIPPLE: ${{ secrets.CONAN_TOKEN }}
with:
configuration: ${{ matrix.configuration.type }}
- name: build
uses: ./.github/actions/build
with:
generator: '${{ matrix.version.generator }}'
configuration: ${{ matrix.configuration.type }}
# Hard code for now. Move to the matrix if varied options are needed
cmake-args: '-Dassert=TRUE -Dwerr=TRUE -Dreporting=OFF -Dunity=ON'
cmake-target: install
- name: test
shell: bash
if: ${{ matrix.configuration.tests }}
run: |
${build_dir}/${{ matrix.configuration.type }}/rippled --unittest \
--unittest-jobs $(nproc)

28
.gitignore vendored
View File

@@ -21,12 +21,9 @@ bin/project-cache.jam
# Ignore object files.
*.o
.nih_c
build
tags
TAGS
GTAGS
GRTAGS
GPATH
bin/rippled
Debug/*.*
Release/*.*
@@ -36,11 +33,8 @@ Release/*.*
*.gcda
*.gcov
# Levelization checking
Builds/levelization/results/rawincludes.txt
Builds/levelization/results/paths.txt
Builds/levelization/results/includes/
Builds/levelization/results/includedby/
# Ignore locally installed node_modules
/node_modules
# Ignore tmp directory.
tmp
@@ -56,15 +50,15 @@ debug_log.txt
# Ignore customized configs
rippled.cfg
validators.txt
test/config.js
# Doxygen generated documentation output
HtmlDocumentation
docs/html_doc
# Xcode user-specific project settings
# Xcode
.DS_Store
/build/
*/build/*
*.pbxuser
!default.pbxuser
*.mode1v3
@@ -100,15 +94,3 @@ Builds/VisualStudio2015/*.sdf
# MSVC
*.pdb
.vs/
CMakeSettings.json
compile_commands.json
.clangd
packages
pkg_out
pkg
CMakeUserPresets.json
bld.rippled/
.vscode
# Suggested in-tree build directory
/.build/

12
.gitmodules vendored Normal file
View File

@@ -0,0 +1,12 @@
[submodule "docs/docca"]
path = docs/docca
url = https://github.com/vinniefalco/docca.git
[submodule "src/nudb/extras/beast"]
path = src/nudb/extras/beast
url = https://github.com/vinniefalco/Beast.git
[submodule "src/nudb/extras/rocksdb"]
path = src/nudb/extras/rocksdb
url = https://github.com/facebook/rocksdb.git
[submodule "src/nudb/doc/docca"]
path = src/nudb/doc/docca
url = https://github.com/vinniefalco/docca.git

View File

@@ -1,6 +0,0 @@
# .pre-commit-config.yaml
repos:
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v18.1.3
hooks:
- id: clang-format

71
.travis.yml Normal file
View File

@@ -0,0 +1,71 @@
sudo: false
language: cpp
env:
global:
- LLVM_VERSION=3.8.0
# Maintenance note: to move to a new version
# of boost, update both BOOST_ROOT and BOOST_URL.
# Note that for simplicity, BOOST_ROOT's final
# namepart must match the folder name internal
# to boost's .tar.gz.
- LCOV_ROOT=$HOME/lcov
- BOOST_ROOT=$HOME/boost_1_60_0
- BOOST_URL='http://sourceforge.net/projects/boost/files/boost/1.60.0/boost_1_60_0.tar.gz'
addons:
apt:
sources: ['ubuntu-toolchain-r-test']
packages:
- gcc-5
- g++-5
- python-software-properties
- protobuf-compiler
- libprotobuf-dev
- libssl-dev
- libstdc++6
- binutils-gold
# Provides a backtrace if the unittests crash
- gdb
matrix:
include:
# Default BUILD is "scons".
- compiler: gcc
env: GCC_VER=5 BUILD=cmake TARGET=debug.nounity PATH=$PWD/cmake/bin:$PATH
- compiler: gcc
env: GCC_VER=5 TARGET=coverage
- compiler: clang
env: GCC_VER=5 TARGET=debug CLANG_VER=3.8 PATH=$PWD/llvm-$LLVM_VERSION/bin:$PATH
- compiler: clang
env: GCC_VER=5 TARGET=debug.nounity CLANG_VER=3.8 PATH=$PWD/llvm-$LLVM_VERSION/bin:$PATH
# The clang cmake builds do not link.
# - compiler: clang
# env: GCC_VER=5 BUILD=cmake TARGET=debug CLANG_VER=3.8 PATH=$PWD/llvm-$LLVM_VERSION/bin:$PWD/cmake/bin:$PATH
# - compiler: clang
# env: GCC_VER=5 BUILD=cmake TARGET=debug.nounity CLANG_VER=3.8 PATH=$PWD/llvm-$LLVM_VERSION/bin:$PWD/cmake/bin:$PATH
cache:
directories:
- $BOOST_ROOT
- llvm-$LLVM_VERSION
- cmake
before_install:
- bin/ci/ubuntu/install-dependencies.sh
script:
- travis_retry bin/ci/ubuntu/build-and-test.sh
notifications:
email:
false
irc:
channels:
- "chat.freenode.net#ripple-dev"

View File

@@ -1,225 +0,0 @@
# API Changelog
This changelog is intended to list all updates to the [public API methods](https://xrpl.org/public-api-methods.html).
For info about how [API versioning](https://xrpl.org/request-formatting.html#api-versioning) works, including examples, please view the [XLS-22d spec](https://github.com/XRPLF/XRPL-Standards/discussions/54). For details about the implementation of API versioning, view the [implementation PR](https://github.com/XRPLF/rippled/pull/3155). API versioning ensures existing integrations and users continue to receive existing behavior, while those that request a higher API version will experience new behavior.
The API version controls the API behavior you see. This includes what properties you see in responses, what parameters you're permitted to send in requests, and so on. You specify the API version in each of your requests. When a breaking change is introduced to the `rippled` API, a new version is released. To avoid breaking your code, you should set (or increase) your version when you're ready to upgrade.
For a log of breaking changes, see the **API Version [number]** headings. In general, breaking changes are associated with a particular API Version number. For non-breaking changes, scroll to the **XRP Ledger version [x.y.z]** headings. Non-breaking changes are associated with a particular XRP Ledger (`rippled`) release.
## API Version 2
API version 2 is available in `rippled` version 2.0.0 and later. To use this API, clients specify `"api_version" : 2` in each request.
#### Removed methods
In API version 2, the following deprecated methods are no longer available: (https://github.com/XRPLF/rippled/pull/4759)
- `tx_history` - Instead, use other methods such as `account_tx` or `ledger` with the `transactions` field set to `true`.
- `ledger_header` - Instead, use the `ledger` method.
#### Modifications to JSON transaction element in V2
In API version 2, JSON elements for transaction output have been changed and made consistent for all methods which output transactions. (https://github.com/XRPLF/rippled/pull/4775)
This helps to unify the JSON serialization format of transactions. (https://github.com/XRPLF/clio/issues/722, https://github.com/XRPLF/rippled/issues/4727)
- JSON transaction element is named `tx_json`
- Binary transaction element is named `tx_blob`
- JSON transaction metadata element is named `meta`
- Binary transaction metadata element is named `meta_blob`
Additionally, these elements are now consistently available next to `tx_json` (i.e. sibling elements), where possible:
- `hash` - Transaction ID. This data was stored inside transaction output in API version 1, but in API version 2 is a sibling element.
- `ledger_index` - Ledger index (only set on validated ledgers)
- `ledger_hash` - Ledger hash (only set on closed or validated ledgers)
- `close_time_iso` - Ledger close time expressed in ISO 8601 time format (only set on validated ledgers)
- `validated` - Bool element set to `true` if the transaction is in a validated ledger, otherwise `false`
This change affects the following methods:
- `tx` - Transaction data moved into element `tx_json` (was inline inside `result`) or, if binary output was requested, moved from `tx` to `tx_blob`. Renamed binary transaction metadata element (if it was requested) from `meta` to `meta_blob`. Changed location of `hash` and added new elements
- `account_tx` - Renamed transaction element from `tx` to `tx_json`. Renamed binary transaction metadata element (if it was requested) from `meta` to `meta_blob`. Changed location of `hash` and added new elements
- `transaction_entry` - Renamed transaction metadata element from `metadata` to `meta`. Changed location of `hash` and added new elements
- `subscribe` - Renamed transaction element from `transaction` to `tx_json`. Changed location of `hash` and added new elements
- `sign`, `sign_for`, `submit` and `submit_multisigned` - Changed location of `hash` element.
#### Modification to `Payment` transaction JSON schema
When reading Payments, the `Amount` field should generally **not** be used. Instead, use [delivered_amount](https://xrpl.org/partial-payments.html#the-delivered_amount-field) to see the amount that the Payment delivered. To clarify its meaning, the `Amount` field is being renamed to `DeliverMax`. (https://github.com/XRPLF/rippled/pull/4733)
- In `Payment` transaction type, JSON RPC field `Amount` is renamed to `DeliverMax`. To enable smooth client transition, `Amount` is still handled, as described below: (https://github.com/XRPLF/rippled/pull/4733)
- On JSON RPC input (e.g. `submit_multisigned` etc. methods), `Amount` is recognized as an alias to `DeliverMax` for both API version 1 and version 2 clients.
- On JSON RPC input, submitting both `Amount` and `DeliverMax` fields is allowed _only_ if they are identical; otherwise such input is rejected with `rpcINVALID_PARAMS` error.
- On JSON RPC output (e.g. `subscribe`, `account_tx` etc. methods), `DeliverMax` is present in both API version 1 and version 2.
- On JSON RPC output, `Amount` is only present in API version 1 and _not_ in version 2.
#### Modifications to account_info response
- `signer_lists` is returned in the root of the response. (In API version 1, it was nested under `account_data`.) (https://github.com/XRPLF/rippled/pull/3770)
- When using an invalid `signer_lists` value, the API now returns an "invalidParams" error. (https://github.com/XRPLF/rippled/pull/4585)
- (`signer_lists` must be a boolean. In API version 1, strings were accepted and may return a normal response - i.e. as if `signer_lists` were `true`.)
#### Modifications to [account_tx](https://xrpl.org/account_tx.html#account_tx) response
- Using `ledger_index_min`, `ledger_index_max`, and `ledger_index` returns `invalidParams` because if you use `ledger_index_min` or `ledger_index_max`, then it does not make sense to also specify `ledger_index`. In API version 1, no error was returned. (https://github.com/XRPLF/rippled/pull/4571)
- The same applies for `ledger_index_min`, `ledger_index_max`, and `ledger_hash`. (https://github.com/XRPLF/rippled/issues/4545#issuecomment-1565065579)
- Using a `ledger_index_min` or `ledger_index_max` beyond the range of ledgers that the server has:
- returns `lgrIdxMalformed` in API version 2. Previously, in API version 1, no error was returned. (https://github.com/XRPLF/rippled/issues/4288)
- Attempting to use a non-boolean value (such as a string) for the `binary` or `forward` parameters returns `invalidParams` (`rpcINVALID_PARAMS`). Previously, in API version 1, no error was returned. (https://github.com/XRPLF/rippled/pull/4620)
#### Modifications to [noripple_check](https://xrpl.org/noripple_check.html#noripple_check) response
- Attempting to use a non-boolean value (such as a string) for the `transactions` parameter returns `invalidParams` (`rpcINVALID_PARAMS`). Previously, in API version 1, no error was returned. (https://github.com/XRPLF/rippled/pull/4620)
## API Version 1
This version is supported by all `rippled` versions. For WebSocket and HTTP JSON-RPC requests, it is currently the default API version used when no `api_version` is specified.
The [commandline](https://xrpl.org/docs/references/http-websocket-apis/api-conventions/request-formatting/#commandline-format) always uses the latest API version. The command line is intended for ad-hoc usage by humans, not programs or automated scripts. The command line is not meant for use in production code.
### Inconsistency: server_info - network_id
The `network_id` field was added in the `server_info` response in version 1.5.0 (2019), but it is not returned in [reporting mode](https://xrpl.org/rippled-server-modes.html#reporting-mode). However, use of reporting mode is now discouraged, in favor of using [Clio](https://github.com/XRPLF/clio) instead.
## XRP Ledger server version 2.5.0
As of 2025-04-04, version 2.5.0 is in development. You can use a pre-release version by building from source or [using the `nightly` package](https://xrpl.org/docs/infrastructure/installation/install-rippled-on-ubuntu).
### Additions and bugfixes in 2.5.0
- `channel_authorize`: If `signing_support` is not enabled in the config, the RPC is disabled.
## XRP Ledger server version 2.4.0
[Version 2.4.0](https://github.com/XRPLF/rippled/releases/tag/2.4.0) was released on March 4, 2025.
### Additions and bugfixes in 2.4.0
- `ledger_entry`: `state` is added an alias for `ripple_state`.
- `ledger_entry`: Enables case-insensitive filtering by canonical name in addition to case-sensitive filtering by RPC name.
- `validators`: Added new field `validator_list_threshold` in response.
- `simulate`: A new RPC that executes a [dry run of a transaction submission](https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-0069d-simulate#2-rpc-simulate)
- Signing methods autofill fees better and properly handle transactions that don't have a base fee, and will also autofill the `NetworkID` field.
## XRP Ledger server version 2.3.0
[Version 2.3.0](https://github.com/XRPLF/rippled/releases/tag/2.3.0) was released on Nov 25, 2024.
### Breaking changes in 2.3.0
- `book_changes`: If the requested ledger version is not available on this node, a `ledgerNotFound` error is returned and the node does not attempt to acquire the ledger from the p2p network (as with other non-admin RPCs).
Admins can still attempt to retrieve old ledgers with the `ledger_request` RPC.
### Additions and bugfixes in 2.3.0
- `book_changes`: Returns a `validated` field in its response, which was missing in prior versions.
## XRP Ledger server version 2.2.0
[Version 2.2.0](https://github.com/XRPLF/rippled/releases/tag/2.2.0) was released on Jun 5, 2024. The following additions are non-breaking (because they are purely additive):
- The `feature` method now has a non-admin mode for users. (It was previously only available to admin connections.) The method returns an updated list of amendments, including their names and other information. ([#4781](https://github.com/XRPLF/rippled/pull/4781))
## XRP Ledger server version 2.0.0
[Version 2.0.0](https://github.com/XRPLF/rippled/releases/tag/2.0.0) was released on Jan 9, 2024. The following additions are non-breaking (because they are purely additive):
- `server_definitions`: A new RPC that generates a `definitions.json`-like output that can be used in XRPL libraries.
- In `Payment` transactions, `DeliverMax` has been added. This is a replacement for the `Amount` field, which should not be used. Typically, the `delivered_amount` (in transaction metadata) should be used. To ease the transition, `DeliverMax` is present regardless of API version, since adding a field is non-breaking.
- API version 2 has been moved from beta to supported, meaning that it is generally available (regardless of the `beta_rpc_api` setting).
## XRP Ledger server version 2.2.0
The following is a non-breaking addition to the API.
- The `feature` method now has a non-admin mode for users. (It was previously only available to admin connections.) The method returns an updated list of amendments, including their names and other information. ([#4781](https://github.com/XRPLF/rippled/pull/4781))
## XRP Ledger server version 1.12.0
[Version 1.12.0](https://github.com/XRPLF/rippled/releases/tag/1.12.0) was released on Sep 6, 2023. The following additions are non-breaking (because they are purely additive).
- `server_info`: Added `ports`, an array which advertises the RPC and WebSocket ports. This information is also included in the `/crawl` endpoint (which calls `server_info` internally). `grpc` and `peer` ports are also included. (https://github.com/XRPLF/rippled/pull/4427)
- `ports` contains objects, each containing a `port` for the listening port (a number string), and a `protocol` array listing the supported protocols on that port.
- This allows crawlers to build a more detailed topology without needing to port-scan nodes.
- (For peers and other non-admin clients, the info about admin ports is excluded.)
- Clawback: The following additions are gated by the Clawback amendment (`featureClawback`). (https://github.com/XRPLF/rippled/pull/4553)
- Adds an [AccountRoot flag](https://xrpl.org/accountroot.html#accountroot-flags) called `lsfAllowTrustLineClawback` (https://github.com/XRPLF/rippled/pull/4617)
- Adds the corresponding `asfAllowTrustLineClawback` [AccountSet Flag](https://xrpl.org/accountset.html#accountset-flags) as well.
- Clawback is disabled by default, so if an issuer desires the ability to claw back funds, they must use an `AccountSet` transaction to set the AllowTrustLineClawback flag. They must do this before creating any trust lines, offers, escrows, payment channels, or checks.
- Adds the [Clawback transaction type](https://github.com/XRPLF/XRPL-Standards/blob/master/XLS-39d-clawback/README.md#331-clawback-transaction), containing these fields:
- `Account`: The issuer of the asset being clawed back. Must also be the sender of the transaction.
- `Amount`: The amount being clawed back, with the `Amount.issuer` being the token holder's address.
- Adds [AMM](https://github.com/XRPLF/XRPL-Standards/discussions/78) ([#4294](https://github.com/XRPLF/rippled/pull/4294), [#4626](https://github.com/XRPLF/rippled/pull/4626)) feature:
- Adds `amm_info` API to retrieve AMM information for a given tokens pair.
- Adds `AMMCreate` transaction type to create `AMM` instance.
- Adds `AMMDeposit` transaction type to deposit funds into `AMM` instance.
- Adds `AMMWithdraw` transaction type to withdraw funds from `AMM` instance.
- Adds `AMMVote` transaction type to vote for the trading fee of `AMM` instance.
- Adds `AMMBid` transaction type to bid for the Auction Slot of `AMM` instance.
- Adds `AMMDelete` transaction type to delete `AMM` instance.
- Adds `sfAMMID` to `AccountRoot` to indicate that the account is `AMM`'s account. `AMMID` is used to fetch `ltAMM`.
- Adds `lsfAMMNode` `TrustLine` flag to indicate that one side of the `TrustLine` is `AMM` account.
- Adds `tfLPToken`, `tfSingleAsset`, `tfTwoAsset`, `tfOneAssetLPToken`, `tfLimitLPToken`, `tfTwoAssetIfEmpty`,
`tfWithdrawAll`, `tfOneAssetWithdrawAll` which allow a trader to specify different fields combination
for `AMMDeposit` and `AMMWithdraw` transactions.
- Adds new transaction result codes:
- tecUNFUNDED_AMM: insufficient balance to fund AMM. The account does not have funds for liquidity provision.
- tecAMM_BALANCE: AMM has invalid balance. Calculated balances greater than the current pool balances.
- tecAMM_FAILED: AMM transaction failed. Fails due to a processing failure.
- tecAMM_INVALID_TOKENS: AMM invalid LP tokens. Invalid input values, format, or calculated values.
- tecAMM_EMPTY: AMM is in empty state. Transaction requires AMM in non-empty state (LP tokens > 0).
- tecAMM_NOT_EMPTY: AMM is not in empty state. Transaction requires AMM in empty state (LP tokens == 0).
- tecAMM_ACCOUNT: AMM account. Clawback of AMM account.
- tecINCOMPLETE: Some work was completed, but more submissions required to finish. AMMDelete partially deletes the trustlines.
## XRP Ledger server version 1.11.0
[Version 1.11.0](https://github.com/XRPLF/rippled/releases/tag/1.11.0) was released on Jun 20, 2023.
### Breaking changes in 1.11
- Added the ability to mark amendments as obsolete. For the `feature` admin API, there is a new possible value for the `vetoed` field. (https://github.com/XRPLF/rippled/pull/4291)
- The value of `vetoed` can now be `true`, `false`, or `"Obsolete"`.
- Removed the acceptance of seeds or public keys in place of account addresses. (https://github.com/XRPLF/rippled/pull/4404)
- This simplifies the API and encourages better security practices (i.e. seeds should never be sent over the network).
- For the `ledger_data` method, when all entries are filtered out, the `state` field of the response is now an empty list (in other words, an empty array, `[]`). (Previously, it would return `null`.) While this is technically a breaking change, the new behavior is consistent with the documentation, so this is considered only a bug fix. (https://github.com/XRPLF/rippled/pull/4398)
- If and when the `fixNFTokenRemint` amendment activates, there will be a new AccountRoot field, `FirstNFTSequence`. This field is set to the current account sequence when the account issues their first NFT. If an account has not issued any NFTs, then the field is not set. ([#4406](https://github.com/XRPLF/rippled/pull/4406))
- There is a new account deletion restriction: an account can only be deleted if `FirstNFTSequence` + `MintedNFTokens` + `256` is less than the current ledger sequence.
- This is potentially a breaking change if clients have logic for determining whether an account can be deleted.
- NetworkID
- For sidechains and networks with a network ID greater than 1024, there is a new [transaction common field](https://xrpl.org/transaction-common-fields.html), `NetworkID`. (https://github.com/XRPLF/rippled/pull/4370)
- This field helps to prevent replay attacks and is now required for chains whose network ID is 1025 or higher.
- The field must be omitted for Mainnet, so there is no change for Mainnet users.
- There are three new local error codes:
- `telNETWORK_ID_MAKES_TX_NON_CANONICAL`: a `NetworkID` is present but the chain's network ID is less than 1025. Remove the field from the transaction, and try again.
- `telREQUIRES_NETWORK_ID`: a `NetworkID` is required, but is not present. Add the field to the transaction, and try again.
- `telWRONG_NETWORK`: a `NetworkID` is specified, but it is for a different network. Submit the transaction to a different server which is connected to the correct network.
### Additions and bug fixes in 1.11
- Added `nftoken_id`, `nftoken_ids` and `offer_id` meta fields into NFT `tx` and `account_tx` responses. (https://github.com/XRPLF/rippled/pull/4447)
- Added an `account_flags` object to the `account_info` method response. (https://github.com/XRPLF/rippled/pull/4459)
- Added `NFTokenPages` to the `account_objects` RPC. (https://github.com/XRPLF/rippled/pull/4352)
- Fixed: `marker` returned from the `account_lines` command would not work on subsequent commands. (https://github.com/XRPLF/rippled/pull/4361)
## XRP Ledger server version 1.10.0
[Version 1.10.0](https://github.com/XRPLF/rippled/releases/tag/1.10.0)
was released on Mar 14, 2023.
### Breaking changes in 1.10
- If the `XRPFees` feature is enabled, the `fee_ref` field will be
removed from the [ledger subscription stream](https://xrpl.org/subscribe.html#ledger-stream), because it will no longer
have any meaning.
# Unit tests for API changes
The following information is useful to developers contributing to this project:
The purpose of unit tests is to catch bugs and prevent regressions. In general, it often makes sense to create a test function when there is a breaking change to the API. For APIs that have changed in a new API version, the tests should be modified so that both the prior version and the new version are properly tested.
To take one example: for `account_info` version 1, WebSocket and JSON-RPC behavior should be tested. The latest API version, i.e. API version 2, should be tested over WebSocket, JSON-RPC, and command line.

500
BUILD.md
View File

@@ -1,500 +0,0 @@
| :warning: **WARNING** :warning:
|---|
| These instructions assume you have a C++ development environment ready with Git, Python, Conan, CMake, and a C++ compiler. For help setting one up on Linux, macOS, or Windows, [see this guide](./docs/build/environment.md). |
> These instructions also assume a basic familiarity with Conan and CMake.
> If you are unfamiliar with Conan,
> you can read our [crash course](./docs/build/conan.md)
> or the official [Getting Started][3] walkthrough.
## Branches
For a stable release, choose the `master` branch or one of the [tagged
releases](https://github.com/ripple/rippled/releases).
```
git checkout master
```
For the latest release candidate, choose the `release` branch.
```
git checkout release
```
For the latest set of untested features, or to contribute, choose the `develop`
branch.
```
git checkout develop
```
## Minimum Requirements
See [System Requirements](https://xrpl.org/system-requirements.html).
Building rippled generally requires git, Python, Conan, CMake, and a C++ compiler. Some guidance on setting up such a [C++ development environment can be found here](./docs/build/environment.md).
- [Python 3.7](https://www.python.org/downloads/)
- [Conan 1.60](https://conan.io/downloads.html)[^1]
- [CMake 3.16](https://cmake.org/download/)
[^1]: It is possible to build with Conan 2.x,
but the instructions are significantly different,
which is why we are not recommending it yet.
Notably, the `conan profile update` command is removed in 2.x.
Profiles must be edited by hand.
`rippled` is written in the C++20 dialect and includes the `<concepts>` header.
The [minimum compiler versions][2] required are:
| Compiler | Version |
|-------------|---------|
| GCC | 11 |
| Clang | 13 |
| Apple Clang | 13.1.6 |
| MSVC | 19.23 |
### Linux
The Ubuntu operating system has received the highest level of
quality assurance, testing, and support.
Here are [sample instructions for setting up a C++ development environment on Linux](./docs/build/environment.md#linux).
### Mac
Many rippled engineers use macOS for development.
Here are [sample instructions for setting up a C++ development environment on macOS](./docs/build/environment.md#macos).
### Windows
Windows is not recommended for production use at this time.
- Additionally, 32-bit Windows development is not supported.
[Boost]: https://www.boost.org/
## Steps
### Set Up Conan
After you have a [C++ development environment](./docs/build/environment.md) ready with Git, Python, Conan, CMake, and a C++ compiler, you may need to set up your Conan profile.
These instructions assume a basic familiarity with Conan and CMake.
If you are unfamiliar with Conan, then please read [this crash course](./docs/build/conan.md) or the official [Getting Started][3] walkthrough.
You'll need at least one Conan profile:
```
conan profile new default --detect
```
Update the compiler settings:
```
conan profile update settings.compiler.cppstd=20 default
```
Configure Conan (1.x only) to use recipe revisions:
```
conan config set general.revisions_enabled=1
```
**Linux** developers will commonly have a default Conan [profile][] that compiles
with GCC and links with libstdc++.
If you are linking with libstdc++ (see profile setting `compiler.libcxx`),
then you will need to choose the `libstdc++11` ABI:
```
conan profile update settings.compiler.libcxx=libstdc++11 default
```
Ensure inter-operability between `boost::string_view` and `std::string_view` types:
```
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_BEAST_USE_STD_STRING_VIEW"]' default
conan profile update 'env.CXXFLAGS="-DBOOST_BEAST_USE_STD_STRING_VIEW"' default
```
If you have other flags in the `conf.tools.build` or `env.CXXFLAGS` sections, make sure to retain the existing flags and append the new ones. You can check them with:
```
conan profile show default
```
**Windows** developers may need to use the x64 native build tools.
An easy way to do that is to run the shortcut "x64 Native Tools Command
Prompt" for the version of Visual Studio that you have installed.
Windows developers must also build `rippled` and its dependencies for the x64
architecture:
```
conan profile update settings.arch=x86_64 default
```
### Multiple compilers
When `/usr/bin/g++` exists on a platform, it is the default cpp compiler. This
default works for some users.
However, if this compiler cannot build rippled or its dependencies, then you can
install another compiler and set Conan and CMake to use it.
Update the `conf.tools.build:compiler_executables` setting in order to set the correct variables (`CMAKE_<LANG>_COMPILER`) in the
generated CMake toolchain file.
For example, on Ubuntu 20, you may have gcc at `/usr/bin/gcc` and g++ at `/usr/bin/g++`; if that is the case, you can select those compilers with:
```
conan profile update 'conf.tools.build:compiler_executables={"c": "/usr/bin/gcc", "cpp": "/usr/bin/g++"}' default
```
Replace `/usr/bin/gcc` and `/usr/bin/g++` with paths to the desired compilers.
It should choose the compiler for dependencies as well,
but not all of them have a Conan recipe that respects this setting (yet).
For the rest, you can set these environment variables.
Replace `<path>` with paths to the desired compilers:
- `conan profile update env.CC=<path> default`
- `conan profile update env.CXX=<path> default`
Export our [Conan recipe for Snappy](./external/snappy).
It does not explicitly link the C++ standard library,
which allows you to statically link it with GCC, if you want.
```
# Conan 1.x
conan export external/snappy snappy/1.1.10@
# Conan 2.x
conan export --version 1.1.10 external/snappy
```
Export our [Conan recipe for RocksDB](./external/rocksdb).
It does not override paths to dependencies when building with Visual Studio.
```
# Conan 1.x
conan export external/rocksdb rocksdb/9.7.3@
# Conan 2.x
conan export --version 9.7.3 external/rocksdb
```
Export our [Conan recipe for SOCI](./external/soci).
It patches their CMake to correctly import its dependencies.
```
# Conan 1.x
conan export external/soci soci/4.0.3@
# Conan 2.x
conan export --version 4.0.3 external/soci
```
Export our [Conan recipe for NuDB](./external/nudb).
It fixes some source files to add missing `#include`s.
```
# Conan 1.x
conan export external/nudb nudb/2.0.8@
# Conan 2.x
conan export --version 2.0.8 external/nudb
```
### Build and Test
1. Create a build directory and move into it.
```
mkdir .build
cd .build
```
You can use any directory name. Conan treats your working directory as an
install folder and generates files with implementation details.
You don't need to worry about these files, but make sure to change
your working directory to your build directory before calling Conan.
**Note:** You can specify a directory for the installation files by adding
the `install-folder` or `-if` option to every `conan install` command
in the next step.
2. Use conan to generate CMake files for every configuration you want to build:
```
conan install .. --output-folder . --build missing --settings build_type=Release
conan install .. --output-folder . --build missing --settings build_type=Debug
```
To build Debug, in the next step, be sure to set `-DCMAKE_BUILD_TYPE=Debug`
For a single-configuration generator, e.g. `Unix Makefiles` or `Ninja`,
you only need to run this command once.
For a multi-configuration generator, e.g. `Visual Studio`, you may want to
run it more than once.
Each of these commands should also have a different `build_type` setting.
A second command with the same `build_type` setting will overwrite the files
generated by the first. You can pass the build type on the command line with
`--settings build_type=$BUILD_TYPE` or in the profile itself,
under the section `[settings]` with the key `build_type`.
If you are using a Microsoft Visual C++ compiler,
then you will need to ensure consistency between the `build_type` setting
and the `compiler.runtime` setting.
When `build_type` is `Release`, `compiler.runtime` should be `MT`.
When `build_type` is `Debug`, `compiler.runtime` should be `MTd`.
```
conan install .. --output-folder . --build missing --settings build_type=Release --settings compiler.runtime=MT
conan install .. --output-folder . --build missing --settings build_type=Debug --settings compiler.runtime=MTd
```
3. Configure CMake and pass the toolchain file generated by Conan, located at
`$OUTPUT_FOLDER/build/generators/conan_toolchain.cmake`.
Single-config generators:
Pass the CMake variable [`CMAKE_BUILD_TYPE`][build_type]
and make sure it matches the one of the `build_type` settings
you chose in the previous step.
For example, to build Debug, in the next command, replace "Release" with "Debug"
```
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release -Dxrpld=ON -Dtests=ON ..
```
Multi-config generators:
```
cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -Dxrpld=ON -Dtests=ON ..
```
**Note:** You can pass build options for `rippled` in this step.
5. Build `rippled`.
For a single-configuration generator, it will build whatever configuration
you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator,
you must pass the option `--config` to select the build configuration.
Single-config generators:
```
cmake --build . -j $(nproc)
```
Multi-config generators:
```
cmake --build . --config Release
cmake --build . --config Debug
```
6. Test rippled.
Single-config generators:
```
./rippled --unittest
```
Multi-config generators:
```
./Release/rippled --unittest
./Debug/rippled --unittest
```
The location of `rippled` in your build directory depends on your CMake
generator. Pass `--help` to see the rest of the command line options.
## Coverage report
The coverage report is intended for developers using compilers GCC
or Clang (including Apple Clang). It is generated by the build target `coverage`,
which is only enabled when the `coverage` option is set, e.g. with
`--options coverage=True` in `conan` or `-Dcoverage=ON` variable in `cmake`
Prerequisites for the coverage report:
- [gcovr tool][gcovr] (can be installed e.g. with [pip][python-pip])
- `gcov` for GCC (installed with the compiler by default) or
- `llvm-cov` for Clang (installed with the compiler by default)
- `Debug` build type
A coverage report is created when the following steps are completed, in order:
1. `rippled` binary built with instrumentation data, enabled by the `coverage`
option mentioned above
2. completed run of unit tests, which populates coverage capture data
3. completed run of the `gcovr` tool (which internally invokes either `gcov` or `llvm-cov`)
to assemble both instrumentation data and the coverage capture data into a coverage report
The above steps are automated into a single target `coverage`. The instrumented
`rippled` binary can also be used for regular development or testing work, at
the cost of extra disk space utilization and a small performance hit
(to store coverage capture). In case of a spurious failure of unit tests, it is
possible to re-run the `coverage` target without rebuilding the `rippled` binary
(since it is simply a dependency of the coverage report target). It is also possible
to select only specific tests for the purpose of the coverage report, by setting
the `coverage_test` variable in `cmake`
The default coverage report format is `html-details`, but the user
can override it to any of the formats listed in `Builds/CMake/CodeCoverage.cmake`
by setting the `coverage_format` variable in `cmake`. It is also possible
to generate more than one format at a time by setting the `coverage_extra_args`
variable in `cmake`. The specific command line used to run the `gcovr` tool will be
displayed if the `CODE_COVERAGE_VERBOSE` variable is set.
By default, the code coverage tool runs parallel unit tests with `--unittest-jobs`
set to the number of available CPU cores. This may cause spurious test
errors on Apple. Developers can override the number of unit test jobs with
the `coverage_test_parallelism` variable in `cmake`.
Example use with some cmake variables set:
```
cd .build
conan install .. --output-folder . --build missing --settings build_type=Debug
cmake -DCMAKE_BUILD_TYPE=Debug -Dcoverage=ON -Dxrpld=ON -Dtests=ON -Dcoverage_test_parallelism=2 -Dcoverage_format=html-details -Dcoverage_extra_args="--json coverage.json" -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ..
cmake --build . --target coverage
```
After the `coverage` target is completed, the generated coverage report will be
stored inside the build directory, as either of:
- file named `coverage.`_extension_ , with a suitable extension for the report format, or
- directory named `coverage`, with the `index.html` and other files inside, for the `html-details` or `html-nested` report formats.
## Options
| Option | Default Value | Description |
| --- | ---| ---|
| `assert` | OFF | Enable assertions.
| `coverage` | OFF | Prepare the coverage report. |
| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. |
| `tests` | OFF | Build tests. |
| `unity` | ON | Configure a unity build. |
| `xrpld` | OFF | Build the xrpld (`rippled`) application, and not just the libxrpl library. |
[Unity builds][5] may be faster for the first build
(at the cost of much more memory) since they concatenate sources into fewer
translation units. Non-unity builds may be faster for incremental builds,
and can be helpful for detecting `#include` omissions.
## Troubleshooting
### Conan
After any updates or changes to dependencies, you may need to do the following:
1. Remove your build directory.
2. Remove the Conan cache:
```
rm -rf ~/.conan/data
```
4. Re-run [conan install](#build-and-test).
### 'protobuf/port_def.inc' file not found
If `cmake --build .` results in an error due to a missing a protobuf file, then you might have generated CMake files for a different `build_type` than the `CMAKE_BUILD_TYPE` you passed to conan.
```
/rippled/.build/pb-xrpl.libpb/xrpl/proto/ripple.pb.h:10:10: fatal error: 'google/protobuf/port_def.inc' file not found
10 | #include <google/protobuf/port_def.inc>
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1 error generated.
```
For example, if you want to build Debug:
1. For conan install, pass `--settings build_type=Debug`
2. For cmake, pass `-DCMAKE_BUILD_TYPE=Debug`
### no std::result_of
If your compiler version is recent enough to have removed `std::result_of` as
part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor
definition to your build.
```
conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default
conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default
```
### call to 'async_teardown' is ambiguous
If you are compiling with an early version of Clang 16, then you might hit
a [regression][6] when compiling C++20 that manifests as an [error in a Boost
header][7]. You can workaround it by adding this preprocessor definition:
```
conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_DISABLE_CONCEPTS"' default
conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_DISABLE_CONCEPTS"]' default
```
### recompile with -fPIC
If you get a linker error suggesting that you recompile Boost with
position-independent code, such as:
```
/usr/bin/ld.gold: error: /home/username/.conan/data/boost/1.77.0/_/_/package/.../lib/libboost_container.a(alloc_lib.o):
requires unsupported dynamic reloc 11; recompile with -fPIC
```
Conan most likely downloaded a bad binary distribution of the dependency.
This seems to be a [bug][1] in Conan just for Boost 1.77.0 compiled with GCC
for Linux. The solution is to build the dependency locally by passing
`--build boost` when calling `conan install`.
```
conan install --build boost ...
```
## Add a Dependency
If you want to experiment with a new package, follow these steps:
1. Search for the package on [Conan Center](https://conan.io/center/).
2. Modify [`conanfile.py`](./conanfile.py):
- Add a version of the package to the `requires` property.
- Change any default options for the package by adding them to the
`default_options` property (with syntax `'$package:$option': $value`).
3. Modify [`CMakeLists.txt`](./CMakeLists.txt):
- Add a call to `find_package($package REQUIRED)`.
- Link a library from the package to the target `ripple_libs`
(search for the existing call to `target_link_libraries(ripple_libs INTERFACE ...)`).
4. Start coding! Don't forget to include whatever headers you need from the package.
[1]: https://github.com/conan-io/conan-center-index/issues/13168
[2]: https://en.cppreference.com/w/cpp/compiler_support/20
[3]: https://docs.conan.io/en/latest/getting_started.html
[5]: https://en.wikipedia.org/wiki/Unity_build
[6]: https://github.com/boostorg/beast/issues/2648
[7]: https://github.com/boostorg/beast/issues/2661
[gcovr]: https://gcovr.com/en/stable/getting-started.html
[python-pip]: https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/
[build_type]: https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html
[profile]: https://docs.conan.io/en/latest/reference/profiles.html

38
Builds/ArchLinux/PKGBUILD Normal file
View File

@@ -0,0 +1,38 @@
# Maintainer: Roberto Catini <roberto.catini@gmail.com>
pkgname=rippled
pkgrel=1
pkgver=0
pkgdesc="Ripple peer-to-peer network daemon"
arch=('i686' 'x86_64')
url="https://github.com/ripple/rippled"
license=('custom:ISC')
depends=('protobuf' 'openssl' 'boost-libs')
makedepends=('git' 'scons' 'boost')
backup=("etc/$pkgname/rippled.cfg")
source=("git://github.com/ripple/rippled.git#branch=master")
sha512sums=('SKIP')
pkgver() {
cd "$srcdir/$pkgname"
git describe --long --tags | sed -r 's/([^-]*-g)/r\1/;s/-/./g'
}
build() {
cd "$srcdir/$pkgname"
scons
}
check() {
cd "$srcdir/$pkgname"
build/rippled --unittest
}
package() {
cd "$srcdir/$pkgname"
install -D -m644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
install -D build/rippled "$pkgdir/usr/bin/rippled"
install -D -m644 doc/rippled-example.cfg "$pkgdir/etc/$pkgname/rippled.cfg"
mkdir -p "$pkgdir/var/lib/$pkgname/db"
mkdir -p "$pkgdir/var/log/$pkgname"
}

View File

@@ -0,0 +1,658 @@
# This is a set of common functions and settings for rippled
# and derived products.
############################################################
cmake_minimum_required(VERSION 3.1.0)
if("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
message(WARNING "Builds are strongly discouraged in "
"${CMAKE_SOURCE_DIR}.")
endif()
macro(parse_target)
if (NOT target AND NOT CMAKE_BUILD_TYPE)
if (APPLE)
set(target clang.debug)
elseif(WIN32)
set(target msvc)
else()
set(target gcc.debug)
endif()
endif()
if (target)
# Parse the target
set(remaining ${target})
while (remaining)
# get the component up to the next dot or end
string(REGEX REPLACE "^\\.?([^\\.]+).*$" "\\1" cur_component ${remaining})
string(REGEX REPLACE "^\\.?[^\\.]+(.*$)" "\\1" remaining ${remaining})
if (${cur_component} STREQUAL gcc)
if (DEFINED ENV{GNU_CC})
set(CMAKE_C_COMPILER $ENV{GNU_CC})
elseif ($ENV{CXX} MATCHES .*gcc.*)
set(CMAKE_CXX_COMPILER $ENV{CC})
else()
find_program(CMAKE_C_COMPILER gcc)
endif()
if (DEFINED ENV{GNU_CXX})
set(CMAKE_C_COMPILER $ENV{GNU_CXX})
elseif ($ENV{CXX} MATCHES .*g\\+\\+.*)
set(CMAKE_C_COMPILER $ENV{CC})
else()
find_program(CMAKE_CXX_COMPILER g++)
endif()
endif()
if (${cur_component} STREQUAL clang)
if (DEFINED ENV{CLANG_CC})
set(CMAKE_C_COMPILER $ENV{CLANG_CC})
elseif ($ENV{CXX} MATCHES .*clang.*)
set(CMAKE_CXX_COMPILER $ENV{CC})
else()
find_program(CMAKE_C_COMPILER clang)
endif()
if (DEFINED ENV{CLANG_CXX})
set(CMAKE_C_COMPILER $ENV{CLANG_CXX})
elseif ($ENV{CXX} MATCHES .*clang.*)
set(CMAKE_C_COMPILER $ENV{CC})
else()
find_program(CMAKE_CXX_COMPILER clang++)
endif()
endif()
if (${cur_component} STREQUAL msvc)
# TBD
endif()
if (${cur_component} STREQUAL unity)
set(unity true)
set(nonunity false)
endif()
if (${cur_component} STREQUAL nounity)
set(unity false)
set(nonunity true)
endif()
if (${cur_component} STREQUAL debug)
set(release false)
endif()
if (${cur_component} STREQUAL release)
set(release true)
endif()
if (${cur_component} STREQUAL coverage)
set(coverage true)
set(debug true)
endif()
if (${cur_component} STREQUAL profile)
set(profile true)
endif()
if (${cur_component} STREQUAL ci)
# Workarounds that make various CI builds work, but that
# we don't want in the general case.
set(ci true)
set(openssl_min 1.0.1)
endif()
endwhile()
if (release)
set(CMAKE_BUILD_TYPE Release)
else()
set(CMAKE_BUILD_TYPE Debug)
endif()
if (NOT unity)
set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE}Classic)
endif()
endif()
endmacro()
############################################################
macro(setup_build_cache)
set(san "" CACHE STRING "On gcc & clang, add sanitizer
instrumentation")
set_property(CACHE san PROPERTY STRINGS ";address;thread")
set(assert false CACHE BOOL "Enables asserts, even in release builds")
set(static false CACHE BOOL
"On linux, link protobuf, openssl, libc++, and boost statically")
if (static AND (WIN32 OR APPLE))
message(FATAL_ERROR "Static linking is only supported on linux.")
endif()
if (${CMAKE_GENERATOR} STREQUAL "Unix Makefiles" AND NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Debug)
endif()
# Can't exclude files from configurations, so can't support both
# unity and nonunity configurations at the same time
if (NOT DEFINED unity OR unity)
set(CMAKE_CONFIGURATION_TYPES
Debug
Release)
else()
set(CMAKE_CONFIGURATION_TYPES
DebugClassic
ReleaseClassic)
if(${CMAKE_BUILD_TYPE} STREQUAL "Debug")
set(CMAKE_BUILD_TYPE DebugClassic)
elseif(${CMAKE_BUILD_TYPE} STREQUAL "Release")
set(CMAKE_BUILD_TYPE ReleaseClassic)
endif()
endif()
set(CMAKE_CONFIGURATION_TYPES
${CMAKE_CONFIGURATION_TYPES} CACHE STRING "" FORCE)
endmacro()
############################################################
function(prepend var prefix)
set(listVar "")
foreach(f ${ARGN})
list(APPEND listVar "${prefix}${f}")
endforeach(f)
set(${var} "${listVar}" PARENT_SCOPE)
endfunction()
macro(append_flags name)
foreach (arg ${ARGN})
set(${name} "${${name}} ${arg}")
endforeach()
endmacro()
macro(group_sources curdir)
file(GLOB children RELATIVE ${PROJECT_SOURCE_DIR}/${curdir}
${PROJECT_SOURCE_DIR}/${curdir}/*)
foreach (child ${children})
if (IS_DIRECTORY ${PROJECT_SOURCE_DIR}/${curdir}/${child})
group_sources(${curdir}/${child})
else()
string(REPLACE "/" "\\" groupname ${curdir})
source_group(${groupname} FILES
${PROJECT_SOURCE_DIR}/${curdir}/${child})
endif()
endforeach()
endmacro()
macro(add_with_props src_var files)
list(APPEND ${src_var} ${files})
foreach (arg ${ARGN})
set(props "${props} ${arg}")
endforeach()
set_source_files_properties(
${files}
PROPERTIES COMPILE_FLAGS
${props})
endmacro()
############################################################
macro(determine_build_type)
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES ".*Clang") # both Clang and AppleClang
set(is_clang true)
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
set(is_gcc true)
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
set(is_msvc true)
endif()
if (${CMAKE_GENERATOR} STREQUAL "Xcode")
set(is_xcode true)
else()
set(is_xcode false)
endif()
if (NOT is_gcc AND NOT is_clang AND NOT is_msvc)
message("Current compiler is ${CMAKE_CXX_COMPILER_ID}")
message(FATAL_ERROR "Missing compiler. Must be GNU, Clang, or MSVC")
endif()
endmacro()
############################################################
macro(check_gcc4_abi)
# Check if should use gcc4's ABI
set(gcc4_abi false)
if ($ENV{RIPPLED_OLD_GCC_ABI})
set(gcc4_abi true)
endif()
if (is_gcc AND NOT gcc4_abi)
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 5)
execute_process(COMMAND lsb_release -si OUTPUT_VARIABLE lsb)
string(STRIP "${lsb}" lsb)
if ("${lsb}" STREQUAL "Ubuntu")
execute_process(COMMAND lsb_release -sr OUTPUT_VARIABLE lsb)
string(STRIP ${lsb} lsb)
if (${lsb} VERSION_LESS 15.1)
set(gcc4_abi true)
endif()
endif()
endif()
endif()
if (gcc4_abi)
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
endif()
endmacro()
############################################################
macro(special_build_flags)
if (coverage)
add_compile_options(-fprofile-arcs -ftest-coverage)
append_flags(CMAKE_EXE_LINKER_FLAGS -fprofile-arcs -ftest-coverage)
endif()
if (profile)
add_compile_options(-p -pg)
append_flags(CMAKE_EXE_LINKER_FLAGS -p -pg)
endif()
endmacro()
############################################################
# Params: Boost components to search for.
macro(use_boost)
if ((NOT DEFINED BOOST_ROOT) AND (DEFINED ENV{BOOST_ROOT}))
set(BOOST_ROOT $ENV{BOOST_ROOT})
endif()
if(WIN32 OR CYGWIN)
# Workaround for MSVC having two boost versions - x86 and x64 on same PC in stage folders
if(DEFINED BOOST_ROOT)
if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND IS_DIRECTORY ${BOOST_ROOT}/stage64/lib)
set(Boost_LIBRARY_DIR ${BOOST_ROOT}/stage64/lib)
else()
set(Boost_LIBRARY_DIR ${BOOST_ROOT}/stage/lib)
endif()
endif()
endif()
if (is_clang AND DEFINED ENV{CLANG_BOOST_ROOT})
set(BOOST_ROOT $ENV{CLANG_BOOST_ROOT})
endif()
set(Boost_USE_STATIC_LIBS on)
set(Boost_USE_MULTITHREADED on)
set(Boost_USE_STATIC_RUNTIME off)
find_package(Boost COMPONENTS
${ARGN})
if (Boost_FOUND OR
((CYGWIN OR WIN32) AND Boost_INCLUDE_DIRS AND Boost_LIBRARY_DIRS))
if(NOT Boost_FOUND)
message(WARNING "Boost directory found, but not all components. May not be able to build.")
endif()
include_directories(SYSTEM ${Boost_INCLUDE_DIRS})
link_directories(${Boost_LIBRARY_DIRS})
else()
message(FATAL_ERROR "Boost not found")
endif()
endmacro()
macro(use_pthread)
if (NOT WIN32)
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads)
add_compile_options(${CMAKE_THREAD_LIBS_INIT})
endif()
endmacro()
macro(use_openssl openssl_min)
if (APPLE AND NOT DEFINED ENV{OPENSSL_ROOT_DIR})
find_program(HOMEBREW brew)
if (NOT HOMEBREW STREQUAL "HOMEBREW-NOTFOUND")
execute_process(COMMAND brew --prefix openssl
OUTPUT_VARIABLE OPENSSL_ROOT_DIR
OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
endif()
if (WIN32)
if (DEFINED ENV{OPENSSL_ROOT})
include_directories($ENV{OPENSSL_ROOT}/include)
link_directories($ENV{OPENSSL_ROOT}/lib)
endif()
else()
if (static)
set(tmp CMAKE_FIND_LIBRARY_SUFFIXES)
set(CMAKE_FIND_LIBRARY_SUFFIXES .a)
endif()
find_package(OpenSSL)
if (static)
set(CMAKE_FIND_LIBRARY_SUFFIXES tmp)
endif()
if (OPENSSL_FOUND)
include_directories(${OPENSSL_INCLUDE_DIR})
else()
message(FATAL_ERROR "OpenSSL not found")
endif()
if (UNIX AND NOT APPLE AND ${OPENSSL_VERSION} VERSION_LESS ${openssl_min})
message(FATAL_ERROR
"Your openssl is Version: ${OPENSSL_VERSION}, ${openssl_min} or better is required.")
endif()
endif()
endmacro()
macro(use_protobuf)
if (WIN32)
if (DEFINED ENV{PROTOBUF_ROOT})
include_directories($ENV{PROTOBUF_ROOT}/src)
link_directories($ENV{PROTOBUF_ROOT}/src/.libs)
endif()
# Modified from FindProtobuf.cmake
FUNCTION(PROTOBUF_GENERATE_CPP SRCS HDRS PROTOFILES)
# argument parsing
IF(NOT PROTOFILES)
MESSAGE(SEND_ERROR "Error: PROTOBUF_GENERATE_CPP() called without any proto files")
RETURN()
ENDIF()
SET(OUTPATH ${CMAKE_CURRENT_BINARY_DIR})
SET(PROTOROOT ${CMAKE_CURRENT_SOURCE_DIR})
# the real logic
SET(${SRCS})
SET(${HDRS})
FOREACH(PROTOFILE ${PROTOFILES})
# ensure that the file ends with .proto
STRING(REGEX MATCH "\\.proto$$" PROTOEND ${PROTOFILE})
IF(NOT PROTOEND)
MESSAGE(SEND_ERROR "Proto file '${PROTOFILE}' does not end with .proto")
ENDIF()
GET_FILENAME_COMPONENT(PROTO_PATH ${PROTOFILE} PATH)
GET_FILENAME_COMPONENT(ABS_FILE ${PROTOFILE} ABSOLUTE)
GET_FILENAME_COMPONENT(FILE_WE ${PROTOFILE} NAME_WE)
STRING(REGEX MATCH "^${PROTOROOT}" IN_ROOT_PATH ${PROTOFILE})
STRING(REGEX MATCH "^${PROTOROOT}" IN_ROOT_ABS_FILE ${ABS_FILE})
IF(IN_ROOT_PATH)
SET(MATCH_PATH ${PROTOFILE})
ELSEIF(IN_ROOT_ABS_FILE)
SET(MATCH_PATH ${ABS_FILE})
ELSE()
MESSAGE(SEND_ERROR "Proto file '${PROTOFILE}' is not in protoroot '${PROTOROOT}'")
ENDIF()
# build the result file name
STRING(REGEX REPLACE "^${PROTOROOT}(/?)" "" ROOT_CLEANED_FILE ${MATCH_PATH})
STRING(REGEX REPLACE "\\.proto$$" "" EXT_CLEANED_FILE ${ROOT_CLEANED_FILE})
SET(CPP_FILE "${OUTPATH}/${EXT_CLEANED_FILE}.pb.cc")
SET(H_FILE "${OUTPATH}/${EXT_CLEANED_FILE}.pb.h")
LIST(APPEND ${SRCS} "${CPP_FILE}")
LIST(APPEND ${HDRS} "${H_FILE}")
ADD_CUSTOM_COMMAND(
OUTPUT "${CPP_FILE}" "${H_FILE}"
COMMAND ${CMAKE_COMMAND} -E make_directory ${OUTPATH}
COMMAND ${PROTOBUF_PROTOC_EXECUTABLE}
ARGS "--cpp_out=${OUTPATH}" --proto_path "${PROTOROOT}" "${MATCH_PATH}"
DEPENDS ${ABS_FILE}
COMMENT "Running C++ protocol buffer compiler on ${MATCH_PATH} with root ${PROTOROOT}, generating: ${CPP_FILE}"
VERBATIM)
ENDFOREACH()
SET_SOURCE_FILES_PROPERTIES(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
SET(${SRCS} ${${SRCS}} PARENT_SCOPE)
SET(${HDRS} ${${HDRS}} PARENT_SCOPE)
ENDFUNCTION()
set(PROTOBUF_PROTOC_EXECUTABLE Protoc) # must be on path
else()
if (static)
set(tmp CMAKE_FIND_LIBRARY_SUFFIXES)
set(CMAKE_FIND_LIBRARY_SUFFIXES .a)
endif()
find_package(Protobuf REQUIRED)
if (static)
set(CMAKE_FIND_LIBRARY_SUFFIXES tmp)
endif()
if (is_clang AND DEFINED ENV{CLANG_PROTOBUF_ROOT})
link_directories($ENV{CLANG_PROTOBUF_ROOT}/src/.libs)
include_directories($ENV{CLANG_PROTOBUF_ROOT}/src)
else()
include_directories(${PROTOBUF_INCLUDE_DIRS})
endif()
endif()
include_directories(${CMAKE_CURRENT_BINARY_DIR})
file(GLOB ripple_proto src/ripple/proto/*.proto)
PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${ripple_proto})
if (WIN32)
include_directories(src/protobuf/src
src/protobuf/vsprojects
${CMAKE_CURRENT_BINARY_DIR}/src/ripple/proto)
endif()
endmacro()
############################################################
macro(setup_build_boilerplate)
if (NOT WIN32 AND san)
add_compile_options(-fsanitize=${san} -fno-omit-frame-pointer)
append_flags(CMAKE_EXE_LINKER_FLAGS
-fsanitize=${san})
string(TOLOWER ${san} ci_san)
if (${ci_san} STREQUAL address)
set(SANITIZER_LIBRARIES asan)
add_definitions(-DSANITIZER=ASAN)
endif()
if (${ci_san} STREQUAL thread)
set(SANITIZER_LIBRARIES tsan)
add_definitions(-DSANITIZER=TSAN)
endif()
endif()
############################################################
add_definitions(
-DOPENSSL_NO_SSL2
-DDEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
-DHAVE_USLEEP=1
-DSOCI_CXX_C11=1
-D_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS
-DBOOST_NO_AUTO_PTR
)
if (is_gcc)
add_compile_options(-Wno-unused-but-set-variable -Wno-deprecated)
endif()
# Generator expressions are not supported in add_definitions, use set_property instead
set_property(
DIRECTORY
APPEND
PROPERTY COMPILE_DEFINITIONS
$<$<OR:$<CONFIG:Debug>,$<CONFIG:DebugClassic>>:DEBUG _DEBUG>)
if (NOT assert)
set_property(
DIRECTORY
APPEND
PROPERTY COMPILE_DEFINITIONS
$<$<OR:$<BOOL:${profile}>,$<CONFIG:Release>,$<CONFIG:ReleaseClassic>>:NDEBUG>)
endif()
if (NOT WIN32)
add_definitions(-D_FILE_OFFSET_BITS=64)
append_flags(CMAKE_CXX_FLAGS -frtti -std=c++14 -Wno-invalid-offsetof
-DBOOST_COROUTINE_NO_DEPRECATION_WARNING -DBOOST_COROUTINES_NO_DEPRECATION_WARNING)
add_compile_options(-Wall -Wno-sign-compare -Wno-char-subscripts -Wno-format
-Wno-unused-local-typedefs -g)
# There seems to be an issue using generator experssions with multiple values,
# split the expression
add_compile_options($<$<OR:$<CONFIG:Release>,$<CONFIG:ReleaseClassic>>:-O3>)
add_compile_options($<$<OR:$<CONFIG:Release>,$<CONFIG:ReleaseClassic>>:-fno-strict-aliasing>)
append_flags(CMAKE_EXE_LINKER_FLAGS -rdynamic -g)
if (is_clang)
add_compile_options(
-Wno-redeclared-class-member -Wno-mismatched-tags -Wno-deprecated-register)
add_definitions(-DBOOST_ASIO_HAS_STD_ARRAY)
endif()
if (APPLE)
add_definitions(-DBEAST_COMPILE_OBJECTIVE_CPP=1
-DNO_LOG_UNHANDLED_EXCEPTIONS)
add_compile_options(
-Wno-deprecated -Wno-deprecated-declarations -Wno-unused-variable -Wno-unused-function)
endif()
if (is_gcc)
add_compile_options(-Wno-unused-but-set-variable -Wno-unused-local-typedefs)
add_compile_options($<$<OR:$<CONFIG:Debug>,$<CONFIG:DebugClassic>>:-O0>)
endif (is_gcc)
else(NOT WIN32)
add_compile_options(
/bigobj # Increase object file max size
/EHa # ExceptionHandling all
/fp:precise # Floating point behavior
/Gd # __cdecl calling convention
/Gm- # Minimal rebuild: disabled
/GR # Enable RTTI
/Gy- # Function level linking: disabled
/FS
/MP # Multiprocessor compilation
/openmp- # pragma omp: disabled
/Zc:forScope # Language extension: for scope
/Zi # Generate complete debug info
/errorReport:none # No error reporting to Internet
/nologo # Suppress login banner
/W3 # Warning level 3
/WX- # Disable warnings as errors
/wd"4018"
/wd"4244"
/wd"4267"
/wd"4800" # Disable C4800(int to bool performance)
/wd"4503" # Decorated name length exceeded, name was truncated
)
add_definitions(
-D_WIN32_WINNT=0x6000
-D_SCL_SECURE_NO_WARNINGS
-D_CRT_SECURE_NO_WARNINGS
-DWIN32_CONSOLE
-DNOMINMAX
-DBOOST_COROUTINE_NO_DEPRECATION_WARNING
-DBOOST_COROUTINES_NO_DEPRECATION_WARNING)
append_flags(CMAKE_EXE_LINKER_FLAGS
/DEBUG
/DYNAMICBASE
/ERRORREPORT:NONE
/MACHINE:X64
/MANIFEST
/nologo
/NXCOMPAT
/SUBSYSTEM:CONSOLE
/TLBID:1)
# There seems to be an issue using generator experssions with multiple values,
# split the expression
# /GS Buffers security check: enable
add_compile_options($<$<OR:$<CONFIG:Debug>,$<CONFIG:DebugClassic>>:/GS>)
# /MTd Language: Multi-threaded Debug CRT
add_compile_options($<$<OR:$<CONFIG:Debug>,$<CONFIG:DebugClassic>>:/MTd>)
# /Od Optimization: Disabled
add_compile_options($<$<OR:$<CONFIG:Debug>,$<CONFIG:DebugClassic>>:/Od>)
# /RTC1 Run-time error checks:
add_compile_options($<$<OR:$<CONFIG:Debug>,$<CONFIG:DebugClassic>>:/RTC1>)
# Generator expressions are not supported in add_definitions, use set_property instead
set_property(
DIRECTORY
APPEND
PROPERTY COMPILE_DEFINITIONS
$<$<OR:$<CONFIG:Debug>,$<CONFIG:DebugClassic>>:_CRTDBG_MAP_ALLOC>)
# /MT Language: Multi-threaded CRT
add_compile_options($<$<OR:$<CONFIG:Release>,$<CONFIG:ReleaseClassic>>:/MT>)
add_compile_options($<$<OR:$<CONFIG:Release>,$<CONFIG:ReleaseClassic>>:/Ox>)
# /Ox Optimization: Full
endif (NOT WIN32)
if (static)
append_flags(CMAKE_EXE_LINKER_FLAGS -static-libstdc++)
# set_target_properties(ripple-libpp PROPERTIES LINK_SEARCH_START_STATIC 1)
# set_target_properties(ripple-libpp PROPERTIES LINK_SEARCH_END_STATIC 1)
endif()
endmacro()
############################################################
macro(create_build_folder cur_project)
if (NOT WIN32)
ADD_CUSTOM_TARGET(build_folder ALL
COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Creating build output folder")
add_dependencies(${cur_project} build_folder)
endif()
endmacro()
macro(set_startup_project cur_project)
if (WIN32 AND NOT ci)
if (CMAKE_VERSION VERSION_LESS 3.6)
message(WARNING
"Setting the VS startup project requires cmake 3.6 or later. Please upgrade.")
endif()
set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY
VS_STARTUP_PROJECT ${cur_project})
endif()
endmacro()
macro(link_common_libraries cur_project)
if (NOT MSVC)
target_link_libraries(${cur_project} ${Boost_LIBRARIES})
target_link_libraries(${cur_project} dl)
target_link_libraries(${cur_project} Threads::Threads)
if (APPLE)
find_library(app_kit AppKit)
find_library(foundation Foundation)
target_link_libraries(${cur_project}
crypto ssl ${app_kit} ${foundation})
else()
target_link_libraries(${cur_project} rt)
endif()
else(NOT MSVC)
target_link_libraries(${cur_project}
$<$<OR:$<CONFIG:Debug>,$<CONFIG:DebugClassic>>:VC/static/ssleay32MTd>
$<$<OR:$<CONFIG:Debug>,$<CONFIG:DebugClassic>>:VC/static/libeay32MTd>)
target_link_libraries(${cur_project}
$<$<OR:$<CONFIG:Release>,$<CONFIG:ReleaseClassic>>:VC/static/ssleay32MT>
$<$<OR:$<CONFIG:Release>,$<CONFIG:ReleaseClassic>>:VC/static/libeay32MT>)
target_link_libraries(${cur_project}
legacy_stdio_definitions.lib Shlwapi kernel32 user32 gdi32 winspool comdlg32
advapi32 shell32 ole32 oleaut32 uuid odbc32 odbccp32)
endif (NOT MSVC)
endmacro()

30
Builds/Docker/Dockerfile Normal file
View File

@@ -0,0 +1,30 @@
# rippled
# use the ubuntu base image
FROM ubuntu
MAINTAINER Roberto Catini roberto.catini@gmail.com
# make sure the package repository is up to date
RUN apt-get update
RUN apt-get -y upgrade
# install the dependencies
RUN apt-get -y install git scons pkg-config protobuf-compiler libprotobuf-dev libssl-dev libboost1.55-all-dev
# download source code from official repository
RUN git clone https://github.com/ripple/rippled.git src; cd src/; git checkout master
# compile
RUN cd src/; scons build/rippled
# move to root directory and strip
RUN cp src/build/rippled rippled; strip rippled
# copy default config
RUN cp src/doc/rippled-example.cfg rippled.cfg
# clean source
RUN rm -r src
# launch rippled when launching the container
ENTRYPOINT ./rippled

View File

@@ -0,0 +1,23 @@
FROM ubuntu
MAINTAINER Torrie Fischer <torrie@ripple.com>
RUN apt-get update -qq &&\
apt-get install -qq software-properties-common &&\
apt-add-repository -y ppa:ubuntu-toolchain-r/test &&\
apt-add-repository -y ppa:afrank/boost &&\
apt-get update -qq
RUN apt-get purge -qq libboost1.48-dev &&\
apt-get install -qq libprotobuf8 libboost1.57-all-dev
RUN mkdir -p /srv/rippled/data
VOLUME /srv/rippled/data/
ENTRYPOINT ["/srv/rippled/bin/rippled"]
CMD ["--conf", "/srv/rippled/data/rippled.cfg"]
EXPOSE 51235/udp
EXPOSE 5005/tcp
ADD ./rippled.cfg /srv/rippled/data/rippled.cfg
ADD ./rippled /srv/rippled/bin/

13
Builds/Docker/build-ci.sh Executable file
View File

@@ -0,0 +1,13 @@
set -e
mkdir -p build/docker/
cp doc/rippled-example.cfg build/clang.debug/rippled build/docker/
cp Builds/Docker/Dockerfile-testnet build/docker/Dockerfile
mv build/docker/rippled-example.cfg build/docker/rippled.cfg
strip build/docker/rippled
docker build -t ripple/rippled:$CIRCLE_SHA1 build/docker/
docker tag ripple/rippled:$CIRCLE_SHA1 ripple/rippled:latest
if [ -z "$CIRCLE_PR_NUMBER" ]; then
docker tag ripple/rippled:$CIRCLE_SHA1 ripple/rippled:$CIRCLE_BRANCH
fi

16
Builds/Docker/push-to-hub.sh Executable file
View File

@@ -0,0 +1,16 @@
set -e
if [ -z "$DOCKER_EMAIL" -o -z "$DOCKER_USERNAME" -o -z "$DOCKER_PASSWORD" ];then
echo "Docker credentials are not set. Can't login to docker, no containers will be pushed."
exit 0
fi
if [ -n "$CIRCLE_PR_NUMBER" ]; then
echo "Not pushing results of a pull request build."
exit 0
fi
docker login -e $DOCKER_EMAIL -u $DOCKER_USERNAME -p $DOCKER_PASSWORD
docker push ripple/rippled:$CIRCLE_SHA1
docker push ripple/rippled:$CIRCLE_BRANCH
docker push ripple/rippled:latest

31
Builds/Eclipse/README.md Normal file
View File

@@ -0,0 +1,31 @@
**Requirements**
1. Java Runtime Environment (JRE)
2. Eclipse with CDT (tested on Luna):
http://www.eclipse.org/downloads/packages/eclipse-ide-cc-developers/lunasr2
3. Eclipse SCons plugin: http://sconsolidator.com/
**WARNING**: by default the SCons plugin uses 16 threads. Go to
*Window->Preferences->SCons->Build Settings* in Eclipse and make it
use only 4-8 jobs(threads) or whatever you feel confortable with. It will
positively freeze your system if you run with 16 threads/jobs.
![scons](scons.png)
**Getting Started**
After setting up Eclipse just do a File->New->Other...
Select: C/C++ / New SCons project from existing source
Point the importer to the folder where the SConstruct resides (the root
folder of your git workspace normally)
**Build**
Just hit Project->Build All in Eclipse to get started. And remember to not
let it run 16 threads!
**Debug**
Start a new Eclipse debug configuration and set binary to run to build/rippled
(assuming you have built it).
![debug](debug.png)

BIN
Builds/Eclipse/debug.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 18 KiB

BIN
Builds/Eclipse/scons.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

View File

@@ -0,0 +1,22 @@
#!/usr/bin/env bash
#
# This scripts installs the dependencies needed by rippled. It should be run
# with sudo.
#
if [ ! -f /etc/fedora-release ]; then
echo "This script is meant to be run on fedora"
exit 1
fi
fedora_release=$(grep -o '[0-9]*' /etc/fedora-release)
if (( $(bc <<< "${fedora_release} < 22") )); then
echo "This script is meant to run on fedora 22 or greater"
exit 1
fi
yum -y update
yum -y group install "Development Tools"
yum -y install gcc-c++ scons openssl-devel openssl-static protobuf-devel protobuf-static boost-devel boost-static libstdc++-static

5
Builds/QtCreator/.gitignore vendored Normal file
View File

@@ -0,0 +1,5 @@
# QTCreator
Makefile
*.user

View File

@@ -0,0 +1,112 @@
# Ripple protocol buffers
PROTOS = ../../src/ripple_data/protocol/ripple.proto
PROTOS_DIR = ../../build/proto
# Google Protocol Buffers support
protobuf_h.name = protobuf header
protobuf_h.input = PROTOS
protobuf_h.output = $${PROTOS_DIR}/${QMAKE_FILE_BASE}.pb.h
protobuf_h.depends = ${QMAKE_FILE_NAME}
protobuf_h.commands = protoc --cpp_out=$${PROTOS_DIR} --proto_path=${QMAKE_FILE_PATH} ${QMAKE_FILE_NAME}
protobuf_h.variable_out = HEADERS
QMAKE_EXTRA_COMPILERS += protobuf_h
protobuf_cc.name = protobuf implementation
protobuf_cc.input = PROTOS
protobuf_cc.output = $${PROTOS_DIR}/${QMAKE_FILE_BASE}.pb.cc
protobuf_cc.depends = $${PROTOS_DIR}/${QMAKE_FILE_BASE}.pb.h
protobuf_cc.commands = $$escape_expand(\\n)
#protobuf_cc.variable_out = SOURCES
QMAKE_EXTRA_COMPILERS += protobuf_cc
# Ripple compilation
DESTDIR = ../../build/QtCreator
OBJECTS_DIR = ../../build/QtCreator/obj
TEMPLATE = app
CONFIG += console thread warn_off
CONFIG -= qt gui
DEFINES += _DEBUG
linux-g++:QMAKE_CXXFLAGS += \
-Wall \
-Wno-sign-compare \
-Wno-char-subscripts \
-Wno-invalid-offsetof \
-Wno-unused-parameter \
-Wformat \
-O0 \
-std=c++11 \
-pthread
INCLUDEPATH += \
"../../src/leveldb/" \
"../../src/leveldb/port" \
"../../src/leveldb/include" \
$${PROTOS_DIR}
OTHER_FILES += \
# $$files(../../src/*, true) \
# $$files(../../src/beast/*) \
# $$files(../../src/beast/modules/beast_basics/diagnostic/*)
# $$files(../../src/beast/modules/beast_core/, true)
UI_HEADERS_DIR += ../../src/ripple_basics
# ---------
# New style
#
SOURCES += \
../../src/ripple/beast/ripple_beast.unity.cpp \
../../src/ripple/beast/ripple_beastc.c \
../../src/ripple/common/ripple_common.unity.cpp \
../../src/ripple/http/ripple_http.unity.cpp \
../../src/ripple/json/ripple_json.unity.cpp \
../../src/ripple/peerfinder/ripple_peerfinder.unity.cpp \
../../src/ripple/radmap/ripple_radmap.unity.cpp \
../../src/ripple/resource/ripple_resource.unity.cpp \
../../src/ripple/sitefiles/ripple_sitefiles.unity.cpp \
../../src/ripple/sslutil/ripple_sslutil.unity.cpp \
../../src/ripple/testoverlay/ripple_testoverlay.unity.cpp \
../../src/ripple/types/ripple_types.unity.cpp \
../../src/ripple/validators/ripple_validators.unity.cpp
# ---------
# Old style
#
SOURCES += \
../../src/ripple_app/ripple_app.unity.cpp \
../../src/ripple_app/ripple_app_pt1.unity.cpp \
../../src/ripple_app/ripple_app_pt2.unity.cpp \
../../src/ripple_app/ripple_app_pt3.unity.cpp \
../../src/ripple_app/ripple_app_pt4.unity.cpp \
../../src/ripple_app/ripple_app_pt5.unity.cpp \
../../src/ripple_app/ripple_app_pt6.unity.cpp \
../../src/ripple_app/ripple_app_pt7.unity.cpp \
../../src/ripple_app/ripple_app_pt8.unity.cpp \
../../src/ripple_basics/ripple_basics.unity.cpp \
../../src/ripple_core/ripple_core.unity.cpp \
../../src/ripple_data/ripple_data.unity.cpp \
../../src/ripple_hyperleveldb/ripple_hyperleveldb.unity.cpp \
../../src/ripple_leveldb/ripple_leveldb.unity.cpp \
../../src/ripple_net/ripple_net.unity.cpp \
../../src/ripple_overlay/ripple_overlay.unity.cpp \
../../src/ripple_rpc/ripple_rpc.unity.cpp \
../../src/ripple_websocket/ripple_websocket.unity.cpp
LIBS += \
-lboost_date_time-mt\
-lboost_filesystem-mt \
-lboost_program_options-mt \
-lboost_regex-mt \
-lboost_system-mt \
-lboost_thread-mt \
-lboost_random-mt \
-lprotobuf \
-lssl \
-lrt

266
Builds/Test.py Executable file
View File

@@ -0,0 +1,266 @@
#!/usr/bin/env python
# This file is part of rippled: https://github.com/ripple/rippled
# Copyright (c) 2012 - 2015 Ripple Labs Inc.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Invocation:
./Builds/Test.py - builds and tests all configurations
The build must succeed without shell aliases for this to work.
To pass flags to scons, put them at the very end of the command line, after
the -- flag - like this:
./Builds/Test.py -- -j4 # Pass -j4 to scons.
Common problems:
1) Boost not found. Solution: export BOOST_ROOT=[path to boost folder]
2) OpenSSL not found. Solution: export OPENSSL_ROOT=[path to OpenSSL folder]
3) scons is an alias. Solution: Create a script named "scons" somewhere in
your $PATH (eg. ~/bin/scons will often work).
#!/bin/sh
python /C/Python27/Scripts/scons.py "${@}"
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import itertools
import os
import platform
import re
import shutil
import sys
import subprocess
def powerset(iterable):
"""powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"""
s = list(iterable)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s) + 1))
IS_WINDOWS = platform.system().lower() == 'windows'
IS_OS_X = platform.system().lower() == 'darwin'
if IS_WINDOWS or IS_OS_X:
ALL_TARGETS = [('debug',), ('release',)]
else:
ALL_TARGETS = [(cc + "." + target,)
for cc in ['gcc', 'clang']
for target in ['debug', 'release', 'coverage', 'profile',
'debug.nounity', 'release.nounity', 'coverage.nounity', 'profile.nounity']]
# list of tuples of all possible options
if IS_WINDOWS or IS_OS_X:
ALL_OPTIONS = [tuple(x) for x in powerset(['--assert'])]
else:
ALL_OPTIONS = list(set(
[tuple(x) for x in powerset(['--ninja', '--static', '--assert', '--sanitize=address'])] +
[tuple(x) for x in powerset(['--ninja', '--static', '--assert', '--sanitize=thread'])]))
# list of tuples of all possible options + all possible targets
ALL_BUILDS = [options + target
for target in ALL_TARGETS
for options in ALL_OPTIONS]
parser = argparse.ArgumentParser(
description='Test.py - run ripple tests'
)
parser.add_argument(
'--all', '-a',
action='store_true',
help='Build all configurations.',
)
parser.add_argument(
'--keep_going', '-k',
action='store_true',
help='Keep going after one configuration has failed.',
)
parser.add_argument(
'--silent', '-s',
action='store_true',
help='Silence all messages except errors',
)
parser.add_argument(
'--verbose', '-v',
action='store_true',
help=('Report more information about which commands are executed and the '
'results.'),
)
parser.add_argument(
'--test', '-t',
default='',
help='Add a prefix for unit tests',
)
parser.add_argument(
'--clean', '-c',
action='store_true',
help='delete all build artifacts after testing',
)
parser.add_argument(
'scons_args',
default=(),
nargs='*'
)
ARGS = parser.parse_args()
def shell(cmd, args=(), silent=False):
""""Execute a shell command and return the output."""
silent = ARGS.silent or silent
verbose = not silent and ARGS.verbose
if verbose:
print('$' + cmd, *args)
command = (cmd,) + args
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=IS_WINDOWS)
lines = []
count = 0
for line in process.stdout:
# Python 2 vs. Python 3
if isinstance(line, str):
decoded = line
else:
decoded = line.decode()
lines.append(decoded)
if verbose:
print(decoded, end='')
elif not silent:
count += 1
if count >= 80:
print()
count = 0
else:
print('.', end='')
if not verbose and count:
print()
process.wait()
return process.returncode, lines
def run_tests(args):
failed = []
if IS_WINDOWS:
binary_re = re.compile(r'build\\([^\\]+)\\rippled.exe')
else:
binary_re = re.compile(r'build/([^/]+)/rippled')
_, lines = shell('scons', ('-n', '--tree=derived',) + args, silent=True)
for line in lines:
match = binary_re.search(line)
if match:
executable, target = match.group(0, 1)
print('Unit tests for', target)
testflag = '--unittest'
if ARGS.test:
testflag += ('=' + ARGS.test)
resultcode, lines = shell(executable, (testflag,))
if resultcode:
if not ARGS.verbose:
print('ERROR:', *lines, sep='')
failed.append([target, 'unittest'])
if not ARGS.keep_going:
break
return failed
def run_build(args=None):
print('Building:', *args or ('(default)',))
resultcode, lines = shell('scons', args)
if resultcode:
print('Build FAILED:')
if not ARGS.verbose:
print(*lines, sep='')
sys.exit(1)
if '--ninja' in args:
resultcode, lines = shell('ninja')
if resultcode:
print('Ninja build FAILED:')
if not ARGS.verbose:
print(*lines, sep='')
sys.exit(1)
def main():
if ARGS.all:
to_build = ALL_BUILDS
else:
to_build = [tuple(ARGS.scons_args)]
all_failed = []
for build in to_build:
args = ()
# additional arguments come first
for arg in list(ARGS.scons_args):
if arg not in build:
args += (arg,)
args += build
run_build(args)
failed = run_tests(args)
if failed:
print('FAILED:', *(':'.join(f) for f in failed))
if not ARGS.keep_going:
sys.exit(1)
else:
all_failed.extend([','.join(build), ':'.join(f)]
for f in failed)
else:
print('Success')
if ARGS.clean:
shutil.rmtree('build')
if '--ninja' in args:
os.remove('build.ninja')
os.remove('.ninja_deps')
os.remove('.ninja_log')
if all_failed:
if len(to_build) > 1:
print()
print('FAILED:', *(':'.join(f) for f in all_failed))
sys.exit(1)
if __name__ == '__main__':
main()
sys.exit(0)

View File

@@ -0,0 +1,82 @@
#!/usr/bin/env bash
#
# This scripts installs boost and protobuf built with clang. This is needed on
# ubuntu 15.10 when building with clang
# It will build these in a 'clang' subdirectory that it creates below the directory
# this script is run from. If a clang directory already exists the script will refuse
# to run.
if hash lsb_release 2>/dev/null; then
if [ $(lsb_release -si) == "Ubuntu" ]; then
ubuntu_release=$(lsb_release -sr)
fi
fi
if [ -z "${ubuntu_release}" ]; then
echo "System not supported"
exit 1
fi
if ! hash clang 2>/dev/null; then
clang_version=3.7
if [ ${ubuntu_release} == "16.04" ]; then
clang_version=3.8
fi
sudo apt-get -y install clang-${clang_version}
update-alternatives --install /usr/bin/clang clang /usr/bin/clang-${clang_version} 99 clang++
hash -r
if ! hash clang 2>/dev/null; then
echo "Please install clang"
exit 1
fi
fi
if [ ${ubuntu_release} != "16.04" ] && [ ${ubuntu_release} != "15.10" ]; then
echo "clang specific boost and protobuf not needed"
exit 0
fi
if [ -d clang ]; then
echo "clang directory already exists. Cowardly refusing to run"
exit 1
fi
if ! hash wget 2>/dev/null; then
sudo apt-get -y install wget
hash -r
if ! hash wget 2>/dev/null; then
echo "Please install wget"
exit 1
fi
fi
num_procs=$(lscpu -p | grep -v '^#' | sort -u -t, -k 2,4 | wc -l) # pysical cores
mkdir clang
pushd clang > /dev/null
# Install protobuf
pb=protobuf-2.6.1
pb_tar=${pb}.tar.gz
wget -O ${pb_tar} https://github.com/google/protobuf/releases/download/v2.6.1/${pb_tar}
tar xf ${pb_tar}
rm ${pb_tar}
pushd ${pb} > /dev/null
./configure CC=clang CXX=clang++ CXXFLAGS='-std=c++14 -O3 -g'
make -j${num_procs}
popd > /dev/null
# Install boost
boost_ver=1.60.0
bd=boost_${boost_ver//./_}
bd_tar=${bd}.tar.gz
wget -O ${bd_tar} http://sourceforge.net/projects/boost/files/boost/${boost_ver}/${bd_tar}
tar xf ${bd_tar}
rm ${bd_tar}
pushd ${bd} > /dev/null
./bootstrap.sh
./b2 toolset=clang -j${num_procs}
popd > /dev/null
popd > /dev/null

39
Builds/Ubuntu/install_boost.sh Executable file
View File

@@ -0,0 +1,39 @@
#!/usr/bin/env bash
#
# This script builds boost with the correct ABI flags for ubuntu
#
version=59
patch=0
if hash lsb_release 2>/dev/null; then
if [ $(lsb_release -si) == "Ubuntu" ]; then
ubuntu_release=$(lsb_release -sr)
fi
fi
if [ -z "${ubuntu_release}" ]; then
echo "System not supported"
exit 1
fi
extra_defines=""
if (( $(bc <<< "${ubuntu_release} < 15.1") )); then
extra_defines="define=_GLIBCXX_USE_CXX11_ABI=0"
fi
num_procs=$(lscpu -p | grep -v '^#' | sort -u -t, -k 2,4 | wc -l) # pysical cores
printf "\nBuild command will be: ./b2 -j${num_procs} ${extra_defines}\n\n"
boost_dir="boost_1_${version}_${patch}"
boost_tag="boost-1.${version}.${patch}"
git clone -b "${boost_tag}" --recursive https://github.com/boostorg/boost.git "${boost_dir}"
cd ${boost_dir}
git checkout --force ${boost_tag}
git submodule foreach git checkout --force ${boost_tag}
./bootstrap.sh
./b2 headers
./b2 -j${num_procs} ${extra_defines}
echo "Build command was: ./b2 -j${num_procs} ${extra_defines}"
echo "Don't forget to set BOOST_ROOT!"

View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
#
# This scripts installs the dependencies needed by rippled. It should be run
# with sudo. For ubuntu < 15.10, it installs gcc 5 as the default compiler. gcc
# 5 is ABI incompatable with gcc 4. If needed, the following will switch back to
# gcc-4: `sudo update-alternatives --config gcc` and choosing the gcc-4
# option.
#
if hash lsb_release 2>/dev/null; then
if [ $(lsb_release -si) == "Ubuntu" ]; then
ubuntu_release=$(lsb_release -sr)
fi
fi
if [ -z "${ubuntu_release}" ]; then
echo "System not supported"
exit 1
fi
if [ ${ubuntu_release} == "12.04" ]; then
apt-get install python-software-properties
add-apt-repository ppa:afrank/boost
add-apt-repository ppa:ubuntu-toolchain-r/test
apt-get update
apt-get -y upgrade
apt-get -y install curl git scons ctags pkg-config protobuf-compiler libprotobuf-dev libssl-dev python-software-properties boost1.57-all-dev g++-5 g++-4.9
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 99 --slave /usr/bin/g++ g++ /usr/bin/g++-5
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.9 99 --slave /usr/bin/g++ g++ /usr/bin/g++-4.9
exit 0
fi
if [ ${ubuntu_release} == "14.04" ] || [ ${ubuntu_release} == "15.04" ]; then
apt-get install python-software-properties
echo "deb [arch=amd64] https://mirrors.ripple.com/ubuntu/ trusty stable contrib" | sudo tee /etc/apt/sources.list.d/ripple.list
wget -O- -q https://mirrors.ripple.com/mirrors.ripple.com.gpg.key | sudo apt-key add -
add-apt-repository ppa:ubuntu-toolchain-r/test
apt-get update
apt-get -y upgrade
apt-get -y install curl git scons ctags pkg-config protobuf-compiler libprotobuf-dev libssl-dev python-software-properties boost-all-dev g++-5 g++-4.9
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 99 --slave /usr/bin/g++ g++ /usr/bin/g++-5
update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.9 99 --slave /usr/bin/g++ g++ /usr/bin/g++-4.9
exit 0
fi
if [ ${ubuntu_release} == "16.04" ] || [ ${ubuntu_release} == "15.10" ]; then
apt-get update
apt-get -y upgrade
apt-get -y install python-software-properties curl git scons ctags pkg-config protobuf-compiler libprotobuf-dev libssl-dev python-software-properties libboost-all-dev
exit 0
fi
echo "System not supported"
exit 1

View File

@@ -0,0 +1,4 @@
RippleD.vcxproj -text
RippleD.vcxproj.filters -text

View File

@@ -0,0 +1,255 @@
# Visual Studio 2015 Build Instructions
## Important
We do not recommend Windows for rippled production use at this time. Currently, the Ubuntu
platform has received the highest level of quality assurance, testing, and support.
Additionally, 32-bit Windows versions are not supported.
## Prerequisites
To clone the source code repository, create branches for inspection or modification,
build rippled under Visual Studio, and run the unit tests you will need these
software components:
* [Visual Studio 2015](README.md#install-visual-studio-2015)
* [Git for Windows](README.md#install-git-for-windows)
* [Google Protocol Buffers Compiler](README.md#install-google-protocol-buffers-compiler)
* (Optional) [Python and Scons](README.md#optional-install-python-and-scons)
* [OpenSSL Library](README.md#install-openssl)
* [Boost library](README.md#build-boost)
## Install Software
### Install Visual Studio 2015
If not already installed on your system, download your choice of installer from the
[Visual Studio 2015 Download](https://www.visualstudio.com/downloads/download-visual-studio-vs)
page, run the installer, and follow the directions. You may need to choose a "Custom"
installation and ensure that "Visual C++" is selected under "Programming Languages".
Any version of Visual Studio 2015 may be used to build rippled.
The **Visual Studio 2015 Community** edition is available free of charge (see
[the product page](https://www.visualstudio.com/products/visual-studio-community-vs)
for licensing details), while paid editions may be used for an free initial trial period.
### Install Git for Windows
Git is a distributed revision control system. The Windows version also provides the
bash shell and many Windows versions of Unix commands. While there are other
varieties of Git (such as TortoiseGit, which has a native Windows interface and
integrates with the Explorer shell), we recommend installing
[Git for Windows](https://git-scm.com/) since
it provides a Unix-like command line environment useful for running shell scripts.
Use of the bash shell under Windows is mandatory for running the unit tests.
* NOTE: To gain full featured access to the
[git-subtree](https://blogs.atlassian.com/2013/05/alternatives-to-git-submodule-git-subtree/)
functionality used in the rippled repository we suggest Git version 2.6.2 or later.
### Install Google Protocol Buffers Compiler
Building rippled requires **protoc.exe** version 2.5.1 or later. At your option you
may build it yourself from the sources in the
[Google Protocol Buffers](https://github.com/google/protobuf) repository,
or you may download a
[protoc.exe](https://ripple.github.io/Downloads/protoc/2.5.1/protoc.exe)
([alternate link](https://github.com/ripple/Downloads/raw/gh-pages/protoc/2.5.1/protoc.exe))
precompiled Windows executable from the
[Ripple Organization](https://github.com/ripple).
Either way, once you have the required version of **protoc.exe**, copy it into
a folder in your command line `%PATH%`.
* **NOTE:** If you use an older version of the compiler, the build will
fail with errors related to a mismatch of the version of protocol
buffer headers versus the compiler.
### (Optional) Install Python and Scons
[Python](https://www.python.org/downloads/) and
[Scons](http://scons.org/download.php) are not required to build
rippled with Visual Studio, but can be used to build from the
command line and in scripts, and are required to properly update
the `RippleD.vcxproj` file.
If you wish to build with scons, a version after 2.3.5 is required
for Visual Studio 2015 support.
## Configure Dependencies
### Install OpenSSL
[Download OpenSSL.](http://slproweb.com/products/Win32OpenSSL.html)
There will be four variants available:
1. 64-bit. Use this if you are running 64-bit windows. As of this writing, the link is called: "Win64 OpenSSL v1.0.2j".
2. 64-bit light - Don't use this. It is missing files needed to build rippled. As of this writing, the link is called: "Win64 OpenSSL v1.0.2j Light"
Run the installer, and choose an appropriate location for your OpenSSL
installation. In this guide we use **C:\lib\OpenSSL-Win64** as the
destination location.
You may be informed on running the installer that "Visual C++ 2008
Redistributables" must first be installed first. If so, download it
from the [same page](http://slproweb.com/products/Win32OpenSSL.html),
again making sure to get the correct 32-/64-bit variant.
* NOTE: Since rippled links statically to OpenSSL, it does not matter
where the OpenSSL .DLL files are placed, or what version they are.
rippled does not use or require any external .DLL files to run
other than the standard operating system ones.
### Build Boost
After [downloading boost](http://www.boost.org/users/download/) and
unpacking it, open a **Developer Command Prompt** for
Visual Studio, change to the directory containing boost, then
bootstrap the build tools:
(As of this writing, the most recent version of boost is 1.62.0, which
will unpack into a directory named `boost_1_62_0`. For higher versions
of boost, adjust the directories provided in these examples as
appropriate.)
```powershell
cd C:\lib\boost_1_62_0
bootstrap
```
The rippled application is linked statically to the standard runtimes and external
dependencies on Windows, to ensure that the behavior of the executable is not
affected by changes in outside files. Therefore, it is necessary to build the
required boost static libraries using this command:
```powershell
bjam --toolset=msvc-14.0 address-model=64 architecture=x86 link=static threading=multi runtime-link=shared,static stage --stagedir=stage64
```
Building the boost libraries may take considerable time. When the build process
is completed, take note of both the reported compiler include paths and linker
library paths as they will be required later.
* NOTE: If older versions of Visual Studio are also installed, the build may fail.
If this happens, make sure that only Visual Studio 2015 is installed. Due to
defects in the uninstallation procedures of these Microsoft products, it may
be necessary to start with a fresh install of the operating system with only
the necessary development environment components installed to have a successful build.
### Clone the rippled repository
If you are familiar with cloning github repositories, just follow your normal process
and clone `git@github.com:ripple/rippled.git`. Otherwise follow this section for instructions.
1. If you don't have a github account, sign up for one at
[github.com](https://github.com/).
2. Make sure you have Github ssh keys. For help see
[generating-ssh-keys](https://help.github.com/articles/generating-ssh-keys).
Open the "Git Bash" shell that was installed with "Git for Windows" in the
step above. Navigate to the directory where you want to clone rippled (git
bash uses `/c` for windows's `C:` and forward slash where windows uses
backslash, so `C:\Users\joe\projs` would be `/c/Users/joe/projs` in git bash).
Now clone the repository and optionally switch to the *master* branch.
Type the following at the bash prompt:
```powershell
git clone git@github.com:ripple/rippled.git
cd rippled
git checkout master
```
* If you receive an error about not having the "correct access rights"
make sure you have Github ssh keys, as described above.
### Configure Library Paths
Open the solution file located at **Builds/Visual Studio 2015/ripple.sln**
and select the "View->Property Manager" to bring up the Property Manager.
Expand the *debug | x64* section and
double click the *Microsoft.Cpp.x64.user* property sheet to bring up the
*Property Pages* dialog. These are global properties applied to all
64-bit build targets:
![Visual Studio 2015 Global Properties](images/VS2015x64Properties.png)
Go to *C/C++, General, Additional Include Directories* and add the
location of the boost installation:
![Visual Studio 2015 Include Directories](images/VS2015x64IncludeDirs.png)
Then, go to *Linker, General, Additional Library Directories* and add
the location of the compiled boost libraries reported at the completion
of building the boost libraries:
![Visual Studio 2015 Library Directories](images/VS2015x64LibraryDirs.png)
Follow the same procedure for adding the `Additional Include Directories`
and `Additional Library Directories` required for OpenSSL. In our example
these directories are **C:\lib\OpenSSL-Win64\include** and
**C:\lib\OpenSSL-Win64\lib** respectively.
# Setup Environment
## Create a working directory for rippled.cfg
The rippled server uses the [Rippled.cfg](https://wiki.ripple.com/Rippled.cfg)
file to read its configuration parameters. This section describes setting up
a directory to hold the config file. The next sections describe how to tell
the rippled server where that file is.
1. Create a directory to hold the configuration file. In this example, the
ripple config directory was created in `C:\Users\joe\ripple\config`.
2. Copy the example config file located in `doc\rippled-example.cfg` to the
new directory and rename it "rippled.cfg".
3. Read the rippled.cfg file and edit as appropriate.
## Change the Visual Studio Projects Debugging Properties
1. If not already open, open the solution file located at **Builds/Visual Studio 2015/Ripple.sln**
2. Select the correct solution platform in the solution platform dropdown (either *x64*
or *Win32* depending on machine type).
3. Select the "Project->Properties" menu item to bring up RippleD's Properties Pages
4. In "Configuration Properties" select "Debugging".
5. In the upper-left Configurations drop down, select "All Configurations".
6. In "Debugger to Launch" select "Local Windows Debugger".
### Tell rippled where to find the configuration file.
The `--conf` command-line switch to tell rippled where to find this file.
In the "Command Arguments" field in the properties dialog (that you opened
in the above section), add: `--conf="C:/Users/joe/ripple/config/rippled.cfg"`
(of course replacing that path with the path you set up above).
![Visual Studio 2013 Command Args Prop Page](images/VSCommandArgsPropPage.png)
### Set the _NO_DEBUG_HEAP Environment Variable
Rippled can run very slowly in the debugger when using the Windows Debug Heap.
Set the `_NO_DEBUG_HEAP` environment variable to one to disable the debug heap.
In the "Environment" field (that you opened in the above section), add:
`_NO_DEBUG_HEAP=1`
![Visual Studio 2013 No Debug Heap Prop Page](images/NoDebugHeapPropPage.png)
# Build
After these steps are complete, rippled should be ready to build. Simply
set rippled as the startup project by right clicking on it in the
Visual Studio Solution Explorer, choose **Set as Startup Project**,
and then choose the **Build->Build Solution** menu item.
# Unit Tests (Recommended)
The rippled unit tests are written in C++ and are part
of the rippled executable.
From a Windows console, run the unit tests:
```
./build/msvc.debug/rippled.exe --unittest
```
Substitute the correct path to the executable to test different builds.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

View File

@@ -0,0 +1,36 @@

Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 14
VisualStudioVersion = 14.0.25123.0
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "RippleD", "RippleD.vcxproj", "{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
debug.classic|x64 = debug.classic|x64
debug.classic|x86 = debug.classic|x86
debug|x64 = debug|x64
debug|x86 = debug|x86
release.classic|x64 = release.classic|x64
release.classic|x86 = release.classic|x86
release|x64 = release|x64
release|x86 = release|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.debug.classic|x64.ActiveCfg = debug.classic|x64
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.debug.classic|x64.Build.0 = debug.classic|x64
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.debug.classic|x86.ActiveCfg = debug.classic|x64
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.debug|x64.ActiveCfg = debug|x64
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.debug|x64.Build.0 = debug|x64
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.debug|x86.ActiveCfg = debug|x64
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.release.classic|x64.ActiveCfg = release.classic|x64
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.release.classic|x64.Build.0 = release.classic|x64
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.release.classic|x86.ActiveCfg = release.classic|x64
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.release|x64.ActiveCfg = release|x64
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.release|x64.Build.0 = release|x64
{26B7D9AC-1A80-8EF8-6703-D061F1BECB75}.release|x86.ActiveCfg = release|x64
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
EndGlobal

177
Builds/XCode/README.md Normal file
View File

@@ -0,0 +1,177 @@
# macos Build Instructions
## Important
We don't recommend OS X for rippled production use at this time. Currently, the
Ubuntu platform has received the highest level of quality assurance and
testing.
## Prerequisites
You'll need OSX 10.8 or later
To clone the source code repository, create branches for inspection or
modification, build rippled using clang, and run the system tests you will need
these software components:
* [XCode](https://developer.apple.com/xcode/)
* [Homebrew](http://brew.sh/)
* [Git](http://git-scm.com/)
* [Scons](http://www.scons.org/)
## Install Software
### Install XCode
If not already installed on your system, download and install XCode using the
appstore or by using [this link](https://developer.apple.com/xcode/).
For more info, see "Step 1: Download and Install the Command Line Tools"
[here](http://www.moncefbelyamani.com/how-to-install-xcode-homebrew-git-rvm-ruby-on-mac)
The command line tools can be installed through the terminal with the command:
```
xcode-select --install
```
### Install Homebrew
> "[Homebrew](http://brew.sh/) installs the stuff you need that Apple didnt."
Open a terminal and type:
```
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
```
For more info, see "Step 3: Install Homebrew"
[here](http://www.moncefbelyamani.com/how-to-install-xcode-homebrew-git-rvm-ruby-on-mac)
### Install Git
```
brew update brew install git
```
For more info, see "Step 4: Install Git"
[here](http://www.moncefbelyamani.com/how-to-install-xcode-homebrew-git-rvm-ruby-on-mac)
**NOTE**: To gain full featured access to the
[git-subtree](http://blogs.atlassian.com/2013/05/alternatives-to-git-submodule-git-subtree/)
functionality used in the rippled repository, we suggest Git version 1.8.3.2 or
later.
### Install Scons
Requires version 2.3.0 or later
```
brew install scons
```
`brew` will generally install the latest stable version of any package, which
will satisfy the scons minimum version requirement for rippled.
### Install Package Config
```
brew install pkg-config
```
## Install/Build/Configure Dependencies
### Build Google Protocol Buffers Compiler
Building rippled on osx requires `protoc` version 2.5.x or 2.6.x (later versions
do not work with rippled at this time).
Download [this](https://github.com/google/protobuf/releases/download/v2.6.1/protobuf-2.6.1.tar.bz2)
We want to compile protocol buffers with clang/libc++:
```
tar xfvj protobuf-2.6.1.tar.bz2
cd protobuf-2.6.1
./configure CC=clang CXX=clang++ CXXFLAGS='-std=c++11 -stdlib=libc++ -O3 -g' LDFLAGS='-stdlib=libc++' LIBS="-lc++ -lc++abi"
make -j 4
sudo make install
```
If you have installed `protobuf` via brew - either directly or indirectly as a
dependency of some other package - this is likely to conflict with our specific
version requirements. The simplest way to avoid conflicts is to uninstall it.
`brew ls --versions protobuf` will list any versions of protobuf
you currently have installed.
### Install OpenSSL
```
brew install openssl
```
### Build Boost
We want to compile boost with clang/libc++
Download [a release](https://sourceforge.net/projects/boost/files/boost/1.61.0/boost_1_61_0.tar.bz2)
Extract it to a folder, making note of where, open a terminal, then:
```
./bootstrap.sh ./b2 toolset=clang threading=multi runtime-link=static link=static cxxflags="-stdlib=libc++" linkflags="-stdlib=libc++" address-model=64
```
Create an environment variable `BOOST_ROOT` in one of your `rc` files, pointing
to the root of the extracted directory.
### Clone the rippled repository
From the terminal
```
git clone git@github.com:ripple/rippled.git
cd rippled
```
Choose the master branch or one of the tagged releases listed on
[GitHub](https://github.com/ripple/rippled/releases GitHub).
```
git checkout master
```
or to test the latest release candidate, choose the `release` branch.
```
git checkout release
```
### Configure Library Paths
If you didn't persistently set the `BOOST_ROOT` environment variable to the
root of the extracted directory above, then you should set it temporarily.
For example, assuming your username were `Abigail` and you extracted Boost
1.61.0 in `/Users/Abigail/Downloads/boost_1_61_0`, you would do for any
shell in which you want to build:
```
export BOOST_ROOT=/Users/Abigail/Downloads/boost_1_61_0
```
## Build
```
scons
```
See: [here](https://ripple.com/wiki/Rippled_build_instructions#Building)
## Unit Tests (Recommended)
rippled builds a set of unit tests into the server executable. To run these unit
tests after building, pass the `--unittest` option to the compiled `rippled`
executable. The executable will exit after running the unit tests.

6
Builds/build_all.sh Normal file
View File

@@ -0,0 +1,6 @@
#!/usr/bin/env bash
num_procs=$(lscpu -p | grep -v '^#' | sort -u -t, -k 2,4 | wc -l) # number of physical cores
cd ..
./Builds/Test.py -a -c -- -j${num_procs}

View File

@@ -1,114 +0,0 @@
# Levelization
Levelization is the term used to describe efforts to prevent rippled from
having or creating cyclic dependencies.
rippled code is organized into directories under `src/rippled` (and
`src/test`) representing modules. The modules are intended to be
organized into "tiers" or "levels" such that a module from one level can
only include code from lower levels. Additionally, a module
in one level should never include code in an `impl` folder of any level
other than it's own.
Unfortunately, over time, enforcement of levelization has been
inconsistent, so the current state of the code doesn't necessarily
reflect these rules. Whenever possible, developers should refactor any
levelization violations they find (by moving files or individual
classes). At the very least, don't make things worse.
The table below summarizes the _desired_ division of modules, based on the
state of the rippled code when it was created. The levels are numbered from
the bottom up with the lower level, lower numbered, more independent
modules listed first, and the higher level, higher numbered modules with
more dependencies listed later.
**tl;dr:** The modules listed first are more independent than the modules
listed later.
| Level / Tier | Module(s) |
|--------------|-----------------------------------------------|
| 01 | ripple/beast ripple/unity
| 02 | ripple/basics
| 03 | ripple/json ripple/crypto
| 04 | ripple/protocol
| 05 | ripple/core ripple/conditions ripple/consensus ripple/resource ripple/server
| 06 | ripple/peerfinder ripple/ledger ripple/nodestore ripple/net
| 07 | ripple/shamap ripple/overlay
| 08 | ripple/app
| 09 | ripple/rpc
| 10 | ripple/perflog
| 11 | test/jtx test/beast test/csf
| 12 | test/unit_test
| 13 | test/crypto test/conditions test/json test/resource test/shamap test/peerfinder test/basics test/overlay
| 14 | test
| 15 | test/net test/protocol test/ledger test/consensus test/core test/server test/nodestore
| 16 | test/rpc test/app
(Note that `test` levelization is *much* less important and *much* less
strictly enforced than `ripple` levelization, other than the requirement
that `test` code should *never* be included in `ripple` code.)
## Validation
The [levelization.sh](levelization.sh) script takes no parameters,
reads no environment variables, and can be run from any directory,
as long as it is in the expected location in the rippled repo.
It can be run at any time from within a checked out repo, and will
do an analysis of all the `#include`s in
the rippled source. The only caveat is that it runs much slower
under Windows than in Linux. It hasn't yet been tested under MacOS.
It generates many files of [results](results):
* `rawincludes.txt`: The raw dump of the `#includes`
* `paths.txt`: A second dump grouping the source module
to the destination module, deduped, and with frequency counts.
* `includes/`: A directory where each file represents a module and
contains a list of modules and counts that the module _includes_.
* `includedby/`: Similar to `includes/`, but the other way around. Each
file represents a module and contains a list of modules and counts
that _include_ the module.
* [`loops.txt`](results/loops.txt): A list of direct loops detected
between modules as they actually exist, as opposed to how they are
desired as described above. In a perfect repo, this file will be
empty.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../.github/workflows/levelization.yml) to validate
that nothing changed.
* [`ordering.txt`](results/ordering.txt): A list showing relationships
between modules where there are no loops as they actually exist, as
opposed to how they are desired as described above.
This file is committed to the repo, and is used by the [levelization
Github workflow](../../.github/workflows/levelization.yml) to validate
that nothing changed.
* [`levelization.yml`](../../.github/workflows/levelization.yml)
Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or
done anything else to improve levelization, run `levelization.sh`,
and commit the updated results.
The `loops.txt` and `ordering.txt` files relate the modules
using comparison signs, which indicate the number of times each
module is included in the other.
* `A > B` means that A should probably be at a higher level than B,
because B is included in A significantly more than A is included in B.
These results can be included in both `loops.txt` and `ordering.txt`.
Because `ordering.txt`only includes relationships where B is not
included in A at all, it will only include these types of results.
* `A ~= B` means that A and B are included in each other a different
number of times, but the values are so close that the script can't
definitively say that one should be above the other. These results
will only be included in `loops.txt`.
* `A == B` means that A and B include each other the same number of
times, so the script has no clue which should be higher. These results
will only be included in `loops.txt`.
The committed files hide the detailed values intentionally, to
prevent false alarms and merging issues, and because it's easy to
get those details locally.
1. Run `levelization.sh`
2. Grep the modules in `paths.txt`.
* For example, if a cycle is found `A ~= B`, simply `grep -w
A Builds/levelization/results/paths.txt | grep -w B`

View File

@@ -1,130 +0,0 @@
#!/bin/bash
# Usage: levelization.sh
# This script takes no parameters, reads no environment variables,
# and can be run from any directory, as long as it is in the expected
# location in the repo.
pushd $( dirname $0 )
if [ -v PS1 ]
then
# if the shell is interactive, clean up any flotsam before analyzing
git clean -ix
fi
# Ensure all sorting is ASCII-order consistently across platforms.
export LANG=C
rm -rfv results
mkdir results
includes="$( pwd )/results/rawincludes.txt"
pushd ../..
echo Raw includes:
grep -r '^[ ]*#include.*/.*\.h' include src | \
grep -v boost | tee ${includes}
popd
pushd results
oldifs=${IFS}
IFS=:
mkdir includes
mkdir includedby
echo Build levelization paths
exec 3< ${includes} # open rawincludes.txt for input
while read -r -u 3 file include
do
level=$( echo ${file} | cut -d/ -f 2,3 )
# If the "level" indicates a file, cut off the filename
if [[ "${level##*.}" != "${level}" ]]
then
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
level="$( dirname ${level} )/toplevel"
fi
level=$( echo ${level} | tr '/' '.' )
includelevel=$( echo ${include} | sed 's/.*["<]//; s/[">].*//' | \
cut -d/ -f 1,2 )
if [[ "${includelevel##*.}" != "${includelevel}" ]]
then
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
includelevel="$( dirname ${includelevel} )/toplevel"
fi
includelevel=$( echo ${includelevel} | tr '/' '.' )
if [[ "$level" != "$includelevel" ]]
then
echo $level $includelevel | tee -a paths.txt
fi
done
echo Sort and dedup paths
sort -ds paths.txt | uniq -c | tee sortedpaths.txt
mv sortedpaths.txt paths.txt
exec 3>&- #close fd 3
IFS=${oldifs}
unset oldifs
echo Split into flat-file database
exec 4<paths.txt # open paths.txt for input
while read -r -u 4 count level include
do
echo ${include} ${count} | tee -a includes/${level}
echo ${level} ${count} | tee -a includedby/${include}
done
exec 4>&- #close fd 4
loops="$( pwd )/loops.txt"
ordering="$( pwd )/ordering.txt"
pushd includes
echo Search for loops
# Redirect stdout to a file
exec 4>&1
exec 1>"${loops}"
for source in *
do
if [[ -f "$source" ]]
then
exec 5<"${source}" # open for input
while read -r -u 5 include includefreq
do
if [[ -f $include ]]
then
if grep -q -w $source $include
then
if grep -q -w "Loop: $include $source" "${loops}"
then
continue
fi
sourcefreq=$( grep -w $source $include | cut -d\ -f2 )
echo "Loop: $source $include"
# If the counts are close, indicate that the two modules are
# on the same level, though they shouldn't be
if [[ $(( $includefreq - $sourcefreq )) -gt 3 ]]
then
echo -e " $source > $include\n"
elif [[ $(( $sourcefreq - $includefreq )) -gt 3 ]]
then
echo -e " $include > $source\n"
elif [[ $sourcefreq -eq $includefreq ]]
then
echo -e " $include == $source\n"
else
echo -e " $include ~= $source\n"
fi
else
echo "$source > $include" >> "${ordering}"
fi
fi
done
exec 5>&- #close fd 5
fi
done
exec 1>&4 #close fd 1
exec 4>&- #close fd 4
cat "${ordering}"
cat "${loops}"
popd
popd
popd

View File

@@ -1,42 +0,0 @@
Loop: test.jtx test.toplevel
test.toplevel > test.jtx
Loop: test.jtx test.unit_test
test.unit_test == test.jtx
Loop: xrpld.app xrpld.core
xrpld.app > xrpld.core
Loop: xrpld.app xrpld.ledger
xrpld.app > xrpld.ledger
Loop: xrpld.app xrpld.net
xrpld.app > xrpld.net
Loop: xrpld.app xrpld.overlay
xrpld.overlay > xrpld.app
Loop: xrpld.app xrpld.peerfinder
xrpld.peerfinder ~= xrpld.app
Loop: xrpld.app xrpld.rpc
xrpld.rpc > xrpld.app
Loop: xrpld.app xrpld.shamap
xrpld.app > xrpld.shamap
Loop: xrpld.core xrpld.net
xrpld.net > xrpld.core
Loop: xrpld.core xrpld.perflog
xrpld.perflog == xrpld.core
Loop: xrpld.net xrpld.rpc
xrpld.rpc ~= xrpld.net
Loop: xrpld.overlay xrpld.rpc
xrpld.rpc ~= xrpld.overlay
Loop: xrpld.perflog xrpld.rpc
xrpld.rpc ~= xrpld.perflog

View File

@@ -1,196 +0,0 @@
libxrpl.basics > xrpl.basics
libxrpl.crypto > xrpl.basics
libxrpl.json > xrpl.basics
libxrpl.json > xrpl.json
libxrpl.protocol > xrpl.basics
libxrpl.protocol > xrpl.json
libxrpl.protocol > xrpl.protocol
libxrpl.resource > xrpl.basics
libxrpl.resource > xrpl.json
libxrpl.resource > xrpl.resource
libxrpl.server > xrpl.basics
libxrpl.server > xrpl.json
libxrpl.server > xrpl.protocol
libxrpl.server > xrpl.server
test.app > test.jtx
test.app > test.rpc
test.app > test.toplevel
test.app > test.unit_test
test.app > xrpl.basics
test.app > xrpld.app
test.app > xrpld.core
test.app > xrpld.ledger
test.app > xrpld.nodestore
test.app > xrpld.overlay
test.app > xrpld.rpc
test.app > xrpl.json
test.app > xrpl.protocol
test.app > xrpl.resource
test.basics > test.jtx
test.basics > test.unit_test
test.basics > xrpl.basics
test.basics > xrpld.perflog
test.basics > xrpld.rpc
test.basics > xrpl.json
test.basics > xrpl.protocol
test.beast > xrpl.basics
test.conditions > xrpl.basics
test.conditions > xrpld.conditions
test.consensus > test.csf
test.consensus > test.toplevel
test.consensus > test.unit_test
test.consensus > xrpl.basics
test.consensus > xrpld.app
test.consensus > xrpld.consensus
test.consensus > xrpld.ledger
test.consensus > xrpl.json
test.core > test.jtx
test.core > test.toplevel
test.core > test.unit_test
test.core > xrpl.basics
test.core > xrpld.core
test.core > xrpld.perflog
test.core > xrpl.json
test.core > xrpl.server
test.csf > xrpl.basics
test.csf > xrpld.consensus
test.csf > xrpl.json
test.csf > xrpl.protocol
test.json > test.jtx
test.json > xrpl.json
test.jtx > xrpl.basics
test.jtx > xrpld.app
test.jtx > xrpld.core
test.jtx > xrpld.ledger
test.jtx > xrpld.net
test.jtx > xrpld.rpc
test.jtx > xrpl.json
test.jtx > xrpl.protocol
test.jtx > xrpl.resource
test.jtx > xrpl.server
test.ledger > test.jtx
test.ledger > test.toplevel
test.ledger > xrpl.basics
test.ledger > xrpld.app
test.ledger > xrpld.core
test.ledger > xrpld.ledger
test.ledger > xrpl.protocol
test.nodestore > test.jtx
test.nodestore > test.toplevel
test.nodestore > test.unit_test
test.nodestore > xrpl.basics
test.nodestore > xrpld.core
test.nodestore > xrpld.nodestore
test.nodestore > xrpld.unity
test.overlay > test.jtx
test.overlay > test.toplevel
test.overlay > test.unit_test
test.overlay > xrpl.basics
test.overlay > xrpld.app
test.overlay > xrpld.overlay
test.overlay > xrpld.peerfinder
test.overlay > xrpld.shamap
test.overlay > xrpl.protocol
test.peerfinder > test.beast
test.peerfinder > test.unit_test
test.peerfinder > xrpl.basics
test.peerfinder > xrpld.core
test.peerfinder > xrpld.peerfinder
test.peerfinder > xrpl.protocol
test.protocol > test.toplevel
test.protocol > xrpl.basics
test.protocol > xrpl.json
test.protocol > xrpl.protocol
test.resource > test.unit_test
test.resource > xrpl.basics
test.resource > xrpl.resource
test.rpc > test.jtx
test.rpc > test.toplevel
test.rpc > xrpl.basics
test.rpc > xrpld.app
test.rpc > xrpld.core
test.rpc > xrpld.net
test.rpc > xrpld.overlay
test.rpc > xrpld.rpc
test.rpc > xrpl.json
test.rpc > xrpl.protocol
test.rpc > xrpl.resource
test.server > test.jtx
test.server > test.toplevel
test.server > test.unit_test
test.server > xrpl.basics
test.server > xrpld.app
test.server > xrpld.core
test.server > xrpld.rpc
test.server > xrpl.json
test.server > xrpl.server
test.shamap > test.unit_test
test.shamap > xrpl.basics
test.shamap > xrpld.nodestore
test.shamap > xrpld.shamap
test.shamap > xrpl.protocol
test.toplevel > test.csf
test.toplevel > xrpl.json
test.unit_test > xrpl.basics
xrpl.json > xrpl.basics
xrpl.protocol > xrpl.basics
xrpl.protocol > xrpl.json
xrpl.resource > xrpl.basics
xrpl.resource > xrpl.json
xrpl.resource > xrpl.protocol
xrpl.server > xrpl.basics
xrpl.server > xrpl.json
xrpl.server > xrpl.protocol
xrpld.app > test.unit_test
xrpld.app > xrpl.basics
xrpld.app > xrpld.conditions
xrpld.app > xrpld.consensus
xrpld.app > xrpld.nodestore
xrpld.app > xrpld.perflog
xrpld.app > xrpl.json
xrpld.app > xrpl.protocol
xrpld.app > xrpl.resource
xrpld.conditions > xrpl.basics
xrpld.conditions > xrpl.protocol
xrpld.consensus > xrpl.basics
xrpld.consensus > xrpl.json
xrpld.consensus > xrpl.protocol
xrpld.core > xrpl.basics
xrpld.core > xrpl.json
xrpld.core > xrpl.protocol
xrpld.ledger > xrpl.basics
xrpld.ledger > xrpl.json
xrpld.ledger > xrpl.protocol
xrpld.net > xrpl.basics
xrpld.net > xrpl.json
xrpld.net > xrpl.protocol
xrpld.net > xrpl.resource
xrpld.nodestore > xrpl.basics
xrpld.nodestore > xrpld.core
xrpld.nodestore > xrpld.unity
xrpld.nodestore > xrpl.json
xrpld.nodestore > xrpl.protocol
xrpld.overlay > xrpl.basics
xrpld.overlay > xrpld.core
xrpld.overlay > xrpld.peerfinder
xrpld.overlay > xrpld.perflog
xrpld.overlay > xrpl.json
xrpld.overlay > xrpl.protocol
xrpld.overlay > xrpl.resource
xrpld.overlay > xrpl.server
xrpld.peerfinder > xrpl.basics
xrpld.peerfinder > xrpld.core
xrpld.peerfinder > xrpl.protocol
xrpld.perflog > xrpl.basics
xrpld.perflog > xrpl.json
xrpld.rpc > xrpl.basics
xrpld.rpc > xrpld.core
xrpld.rpc > xrpld.ledger
xrpld.rpc > xrpld.nodestore
xrpld.rpc > xrpl.json
xrpld.rpc > xrpl.protocol
xrpld.rpc > xrpl.resource
xrpld.rpc > xrpl.server
xrpld.shamap > xrpl.basics
xrpld.shamap > xrpld.nodestore
xrpld.shamap > xrpl.protocol

View File

@@ -0,0 +1,13 @@
--- /usr/include/boost/config/compiler/clang.hpp 2013-07-20 13:17:10.000000000 -0400
+++ /usr/include/boost/config/compiler/clang.rippled.hpp 2014-03-11 16:40:51.000000000 -0400
@@ -39,6 +39,10 @@
// Clang supports "long long" in all compilation modes.
#define BOOST_HAS_LONG_LONG
+#if defined(__SIZEOF_INT128__)
+# define BOOST_HAS_INT128
+#endif
+
//
// Dynamic shared object (DSO) and dynamic-link library (DLL) support
//

View File

@@ -0,0 +1,10 @@
--- /usr/include/boost/bimap/detail/debug/static_error.hpp 2008-03-22 17:45:55.000000000 -0400
+++ /usr/include/boost/bimap/detail/debug/static_error.rippled.hpp 2014-03-12 19:40:05.000000000 -0400
@@ -25,7 +25,6 @@
// a static error.
/*===========================================================================*/
#define BOOST_BIMAP_STATIC_ERROR(MESSAGE,VARIABLES) \
- struct BOOST_PP_CAT(BIMAP_STATIC_ERROR__,MESSAGE) {}; \
BOOST_MPL_ASSERT_MSG(false, \
BOOST_PP_CAT(BIMAP_STATIC_ERROR__,MESSAGE), \
VARIABLES)

View File

@@ -1,146 +1,517 @@
cmake_minimum_required(VERSION 3.16)
# !!! The official build system is SConstruct !!!
# This is an experimental cmake build file for rippled
#
# cmake support in rippled. Currently supports:
#
# * unity/nounity debug/release
# * running protobuf
# * sanitizer builds
# * optional release build with assert turned on
# * `target` variable to easily set compiler/debug/unity
# (i.e. -Dtarget=gcc.debug.nounity)
# * gcc/clang/visual studio/xcode
# * linux/mac/win
# * gcc 4 ABI, when needed
# * ninja builds
# * check openssl version on linux
# * static builds (swd TBD: needs to be tested by building & deploying on different systems)
#
# TBD:
# * jemalloc support
# * count
# * Windows protobuf compiler puts generated file in src directory instead of build directory.
#
# Notes:
# * Use the -G"Visual Studio 14 2015 Win64" generator on Windows. Without this
# a 32-bit project will be created. There is no way to set the generator or
# force a 64-bit build in CMakeLists.txt (setting CMAKE_GENERATOR_PLATFORM won't work).
# The best solution may be to wrap cmake with a script.
#
# * It is not possible to generate a visual studio project on linux or
# mac. The visual studio generator is only available on windows.
#
# * The visual studio project can be _either_ unity or
# non-unity (selected at generation time). It does not appear possible
# to disable compilation based on configuration.
#
# * Language is _much_ worse than python, poor documentation and "quirky"
# language support (for example, generator expressions can only be used
# in limited contexts and seem to work differently based on
# context (set_property can set multiple values, add_compile_options
# can not/or is buggy)
#
# * Could not call out to `sed` because cmake messed with the regular
# expression before calling the external command. I did not see a way
# around this.
#
# * Makefile generators want to be single target. It wants a separate
# directory for each target type. I saw some mentions on the web for
# ways around this bug haven't look into it. The visual studio project
# does support debug/release configurations in the same project (but
# not unity/non-unity).
if(POLICY CMP0074)
cmake_policy(SET CMP0074 NEW)
endif()
if(POLICY CMP0077)
cmake_policy(SET CMP0077 NEW)
endif()
############################################################
# Fix "unrecognized escape" issues when passing CMAKE_MODULE_PATH on Windows.
file(TO_CMAKE_PATH "${CMAKE_MODULE_PATH}" CMAKE_MODULE_PATH)
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
cmake_minimum_required(VERSION 3.1.0)
project(xrpl)
set(CMAKE_CXX_EXTENSIONS OFF)
set(CMAKE_CXX_STANDARD 20)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
if(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
# GCC-specific fixes
add_compile_options(-Wno-unknown-pragmas -Wno-subobject-linkage)
# -Wno-subobject-linkage can be removed when we upgrade GCC version to at least 13.3
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
# Clang-specific fixes
add_compile_options(-Wno-unknown-warning-option) # Ignore unknown warning options
elseif(MSVC)
# MSVC-specific fixes
add_compile_options(/wd4068) # Ignore unknown pragmas
endif()
# make GIT_COMMIT_HASH define available to all sources
find_package(Git)
if(Git_FOUND)
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse HEAD
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gch)
if(gch)
set(GIT_COMMIT_HASH "${gch}")
message(STATUS gch: ${GIT_COMMIT_HASH})
add_definitions(-DGIT_COMMIT_HASH="${GIT_COMMIT_HASH}")
endif()
execute_process(COMMAND ${GIT_EXECUTABLE} --git-dir=${CMAKE_CURRENT_SOURCE_DIR}/.git rev-parse --abbrev-ref HEAD
OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE gb)
if(gb)
set(GIT_BRANCH "${gb}")
message(STATUS gb: ${GIT_BRANCH})
add_definitions(-DGIT_BRANCH="${GIT_BRANCH}")
endif()
endif() #git
if(thread_safety_analysis)
add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS)
add_compile_options("-stdlib=libc++")
add_link_options("-stdlib=libc++")
endif()
include (CheckCXXCompilerFlag)
include (FetchContent)
include (ExternalProject)
include (CMakeFuncs) # must come *after* ExternalProject b/c it overrides one function in EP
if (target)
message (FATAL_ERROR "The target option has been removed - use native cmake options to control build")
endif ()
include(RippledSanity)
include(RippledVersion)
include(RippledSettings)
# this check has to remain in the top-level cmake
# because of the early return statement
if (packages_only)
if (NOT TARGET rpm)
message (FATAL_ERROR "packages_only requested, but targets were not created - is docker installed?")
if("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
set(dir "build")
set(cmd "cmake")
if (target)
set(dir "${dir}/${target}")
set(cmd "${cmd} -Dtarget=${target}")
elseif(CMAKE_BUILD_TYPE)
set(dir "${dir}/${CMAKE_BUILD_TYPE}")
set(cmd "${cmd} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}")
else()
set(dir "${dir}/default")
endif()
return ()
endif ()
include(RippledCompiler)
include(RippledInterface)
set(cmd "${cmd} ${CMAKE_SOURCE_DIR}")
option(only_docs "Include only the docs target?" FALSE)
include(RippledDocs)
if(only_docs)
return()
message(FATAL_ERROR "Builds are not allowed in ${CMAKE_SOURCE_DIR}.\n"
"Instead:\n"
"1) Remove the CMakeCache.txt file and CMakeFiles directory "
"from ${CMAKE_SOURCE_DIR}.\n"
"2) Create a directory to hold your build files, for example: ${dir}.\n"
"3) Change to that directory.\n"
"4) Run cmake targetting ${CMAKE_SOURCE_DIR}, for example: ${cmd}")
endif()
if("${CMAKE_GENERATOR}" MATCHES "Visual Studio" AND
NOT ("${CMAKE_GENERATOR}" MATCHES .*Win64.*))
message(FATAL_ERROR "Visual Studio 32-bit build is unsupported. Use
-G\"${CMAKE_GENERATOR} Win64\"")
endif()
###
set(CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/Builds/CMake")
include(CMakeFuncs)
include(deps/Boost)
find_package(OpenSSL 1.1.1 REQUIRED)
set_target_properties(OpenSSL::SSL PROPERTIES
INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2
)
set(SECP256K1_INSTALL TRUE)
add_subdirectory(external/secp256k1)
add_library(secp256k1::secp256k1 ALIAS secp256k1)
add_subdirectory(external/ed25519-donna)
add_subdirectory(external/antithesis-sdk)
find_package(gRPC REQUIRED)
find_package(lz4 REQUIRED)
# Target names with :: are not allowed in a generator expression.
# We need to pull the include directories and imported location properties
# from separate targets.
find_package(LibArchive REQUIRED)
find_package(SOCI REQUIRED)
find_package(SQLite3 REQUIRED)
set(openssl_min 1.0.2)
option(rocksdb "Enable RocksDB" ON)
if(rocksdb)
find_package(RocksDB REQUIRED)
set_target_properties(RocksDB::rocksdb PROPERTIES
INTERFACE_COMPILE_DEFINITIONS RIPPLE_ROCKSDB_AVAILABLE=1
)
target_link_libraries(ripple_libs INTERFACE RocksDB::rocksdb)
parse_target()
if (NOT DEFINED unity)
set(unity true)
set(nonunity false)
endif()
find_package(nudb REQUIRED)
find_package(date REQUIRED)
find_package(xxHash REQUIRED)
setup_build_cache()
target_link_libraries(ripple_libs INTERFACE
ed25519::ed25519
lz4::lz4
OpenSSL::Crypto
OpenSSL::SSL
secp256k1::secp256k1
soci::soci
SQLite::SQLite3
)
project(rippled)
# Work around changes to Conan recipe for now.
if(TARGET nudb::core)
set(nudb nudb::core)
elseif(TARGET NuDB::nudb)
set(nudb NuDB::nudb)
if(nonunity)
get_cmake_property(allvars VARIABLES)
string(REGEX MATCHALL "[^;]*(DEBUG|RELEASE)[^;]*" matchvars "${allvars}")
foreach(var IN LISTS matchvars)
string(REGEX REPLACE "(DEBUG|RELEASE)" "\\1CLASSIC" newvar ${var})
set(${newvar} ${${var}})
endforeach()
get_cmake_property(allvars CACHE_VARIABLES)
string(REGEX MATCHALL "[^;]*(DEBUG|RELEASE)[^;]*" matchvars "${allvars}")
foreach(var IN LISTS matchvars)
string(REGEX REPLACE "(DEBUG|RELEASE)" "\\1CLASSIC" newvar ${var})
set(${newvar} ${${var}} CACHE STRING "Copied from ${var}")
endforeach()
endif()
determine_build_type()
check_gcc4_abi()
############################################################
include_directories(
src
src/beast
src/beast/include
src/beast/extras
src/nudb/include
src/soci/src
src/soci/include)
special_build_flags()
############################################################
use_boost(
# resist the temptation to alphabetize these. coroutine
# must come before context.
chrono
coroutine
context
date_time
filesystem
program_options
regex
system
thread)
use_pthread()
use_openssl(${openssl_min})
use_protobuf()
setup_build_boilerplate()
############################################################
if (is_clang)
set(rocks_db_system_header --system-header-prefix=rocksdb2)
else()
message(FATAL_ERROR "unknown nudb target")
endif()
target_link_libraries(ripple_libs INTERFACE ${nudb})
if(coverage)
include(RippledCov)
unset(rocks_db_system_header)
endif()
set(PROJECT_EXPORT_SET RippleExports)
include(RippledCore)
include(RippledInstall)
include(RippledValidatorKeys)
set(soci_extra_includes
-I"${CMAKE_SOURCE_DIR}/"src/soci/src/core
-I"${CMAKE_SOURCE_DIR}/"src/soci/include/private
-I"${CMAKE_SOURCE_DIR}/"src/sqlite)
############################################################
# Unity sources
prepend(beast_unity_srcs
src/ripple/beast/unity/
beast_insight_unity.cpp
beast_net_unity.cpp
beast_utility_unity.cpp)
prepend(ripple_unity_srcs
src/ripple/unity/
app_ledger.cpp
app_main.cpp
app_misc.cpp
app_paths.cpp
app_tx.cpp
conditions.cpp
core.cpp
basics.cpp
crypto.cpp
ledger.cpp
net.cpp
overlay.cpp
peerfinder.cpp
json.cpp
protocol.cpp
rpcx.cpp
shamap.cpp
server.cpp)
prepend(test_unity_srcs
src/test/unity/
app_test_unity.cpp
basics_test_unity.cpp
beast_test_unity.cpp
conditions_test_unity.cpp
core_test_unity.cpp
json_test_unity.cpp
ledger_test_unity.cpp
overlay_test_unity.cpp
peerfinder_test_unity.cpp
protocol_test_unity.cpp
resource_test_unity.cpp
rpc_test_unity.cpp
server_test_unity.cpp
shamap_test_unity.cpp
support_unity.cpp)
list(APPEND rippled_src_unity ${beast_unity_srcs} ${ripple_unity_srcs} ${test_unity_srcs})
add_with_props(rippled_src_unity src/test/unity/nodestore_test_unity.cpp
-I"${CMAKE_SOURCE_DIR}/"src/rocksdb2/include
-I"${CMAKE_SOURCE_DIR}/"src/snappy/snappy
-I"${CMAKE_SOURCE_DIR}/"src/snappy/config
${rocks_db_system_header})
add_with_props(rippled_src_unity src/ripple/unity/nodestore.cpp
-I"${CMAKE_SOURCE_DIR}/"src/rocksdb2/include
-I"${CMAKE_SOURCE_DIR}/"src/snappy/snappy
-I"${CMAKE_SOURCE_DIR}/"src/snappy/config
${rocks_db_system_header})
add_with_props(rippled_src_unity src/ripple/unity/soci_ripple.cpp ${soci_extra_includes})
list(APPEND ripple_unity_srcs ${beast_unity_srcs} ${test_unity_srcs}
src/ripple/unity/nodestore.cpp
src/ripple/unity/soci_ripple.cpp
src/test/unity/nodestore_test_unity.cpp)
############################################################
# Non-unity sources
file(GLOB_RECURSE core_srcs src/ripple/core/*.cpp)
add_with_props(rippled_src_nonunity "${core_srcs}"
-I"${CMAKE_SOURCE_DIR}/"src/soci/src/core
-I"${CMAKE_SOURCE_DIR}/"src/sqlite)
set(non_unity_srcs ${core_srcs})
foreach(curdir
beast/clock
beast/container
beast/insight
beast/net
beast/utility
app
basics
conditions
crypto
json
ledger
legacy
net
overlay
peerfinder
protocol
rpc
server
shamap)
file(GLOB_RECURSE cursrcs src/ripple/${curdir}/*.cpp)
list(APPEND rippled_src_nonunity "${cursrcs}")
list(APPEND non_unity_srcs "${cursrcs}")
endforeach()
file(GLOB_RECURSE nodestore_srcs src/ripple/nodestore/*.cpp
src/test/nodestore/*.cpp)
add_with_props(rippled_src_nonunity "${nodestore_srcs}"
-I"${CMAKE_SOURCE_DIR}/"src/rocksdb2/include
-I"${CMAKE_SOURCE_DIR}/"src/snappy/snappy
-I"${CMAKE_SOURCE_DIR}/"src/snappy/config
${rocks_db_system_header})
list(APPEND non_unity_srcs "${nodestore_srcs}")
# unit test sources
foreach(curdir
app
basics
beast
conditions
core
json
ledger
nodestore
overlay
peerfinder
protocol
resource
rpc
server
shamap
jtx)
file(GLOB_RECURSE cursrcs src/test/${curdir}/*.cpp)
list(APPEND test_srcs "${cursrcs}")
endforeach()
add_with_props(rippled_src_nonunity "${test_srcs}"
-I"${CMAKE_SOURCE_DIR}/"src/rocksdb2/include
-I"${CMAKE_SOURCE_DIR}/"src/snappy/snappy
-I"${CMAKE_SOURCE_DIR}/"src/snappy/config
${rocks_db_system_header})
list(APPEND non_unity_srcs "${test_srcs}")
if(WIN32 OR is_xcode)
# Rippled headers. Only needed for IDEs.
file(GLOB_RECURSE rippled_headers src/*.h src/*.hpp)
list(APPEND rippled_headers Builds/CMake/CMakeFuncs.cmake)
foreach(curdir
beast/asio
beast/core
beast/crypto
beast/cxx17
beast/hash
proto
resource
validators
websocket)
file(GLOB_RECURSE cursrcs src/ripple/${curdir}/*.cpp)
list(APPEND rippled_headers "${cursrcs}")
endforeach()
list(APPEND rippled_src_nonunity "${rippled_headers}")
set_property(
SOURCE ${rippled_headers}
APPEND
PROPERTY HEADER_FILE_ONLY
true)
# Doesn't work
# $<OR:$<CONFIG:Debug>,$<CONFIG:Release>>)
endif()
if (WIN32 OR is_xcode)
# Documentation sources. Only needed for IDEs.
prepend(doc_srcs
docs/
Jamfile.v2
boostbook.dtd
index.xml
main.qbk
quickref.xml
reference.xsl
source.dox)
set_property(
SOURCE ${doc_srcs}
APPEND
PROPERTY HEADER_FILE_ONLY
true)
# Doesn't work
# $<OR:$<CONFIG:Debug>,$<CONFIG:Release>>)
endif()
############################################################
add_with_props(rippled_src_all src/ripple/unity/soci.cpp
${soci_extra_includes})
if (NOT is_msvc)
set(no_unused_w -Wno-unused-function)
else()
unset(no_unused_w)
endif()
add_with_props(rippled_src_all src/ripple/unity/secp256k1.cpp
-I"${CMAKE_SOURCE_DIR}/"src/secp256k1
${no_unused_w}
)
foreach(cursrc
src/ripple/beast/unity/beast_hash_unity.cpp
src/ripple/unity/beast.cpp
src/ripple/unity/lz4.c
src/ripple/unity/protobuf.cpp
src/ripple/unity/ripple.proto.cpp
src/ripple/unity/resource.cpp)
add_with_props(rippled_src_all ${cursrc}
${rocks_db_system_header}
)
endforeach()
if (NOT is_msvc)
set(extra_props -Wno-array-bounds)
else()
unset(extra_props)
endif()
add_with_props(rippled_src_all src/sqlite/sqlite_unity.c
${extra_props})
add_with_props(rippled_src_all src/ripple/unity/ed25519_donna.c
-I"${CMAKE_SOURCE_DIR}/"src/ed25519-donna)
if (is_gcc)
set(no_init_w -Wno-maybe-uninitialized)
else()
unset(no_init_w)
endif()
add_with_props(rippled_src_all src/ripple/unity/rocksdb.cpp
-I"${CMAKE_SOURCE_DIR}/"src/rocksdb2
-I"${CMAKE_SOURCE_DIR}/"src/rocksdb2/include
-I"${CMAKE_SOURCE_DIR}/"src/snappy/snappy
-I"${CMAKE_SOURCE_DIR}/"src/snappy/config
${no_init_w} ${rocks_db_system_header})
if (NOT is_msvc)
set(no_unused_w -Wno-unused-function)
endif()
add_with_props(rippled_src_all src/ripple/unity/snappy.cpp
-I"${CMAKE_SOURCE_DIR}/"src/snappy/snappy
-I"${CMAKE_SOURCE_DIR}/"src/snappy/config
${no_unused_w})
if (APPLE AND is_clang)
list(APPEND rippled_src_all src/ripple/unity/beastobjc.mm)
endif()
list(APPEND rippled_src_unity "${rippled_src_all}")
list(APPEND rippled_src_nonunity "${rippled_src_all}")
############################################################
if (WIN32 OR is_xcode)
group_sources(src)
group_sources(docs)
group_sources(Builds)
endif()
if(unity)
add_executable(rippled ${rippled_src_unity} ${PROTO_HDRS})
add_executable(rippled_classic EXCLUDE_FROM_ALL ${rippled_src_nonunity} ${PROTO_HDRS})
set(other_target rippled_classic)
else()
add_executable(rippled ${rippled_src_nonunity} ${PROTO_HDRS})
add_executable(rippled_unity EXCLUDE_FROM_ALL ${rippled_src_unity} ${PROTO_HDRS})
set(other_target rippled_unity)
endif()
list(APPEND targets "rippled")
list(APPEND targets ${other_target})
# Not the same as EXCLUDE_FROM_ALL. Prevents Visual Studio from building the
# other_target when the user builds the solution (default when pressing <F7>)
set_property(TARGET ${other_target} PROPERTY EXCLUDE_FROM_DEFAULT_BUILD true)
find_program(
B2_EXE
NAMES b2
HINTS ${BOOST_ROOT}
PATHS ${BOOST_ROOT}
DOC "Location of the b2 build executable from Boost")
if(${B2_EXE} STREQUAL "B2_EXE-NOTFOUND")
message(WARNING
"Boost b2 executable not found. docs target will not be buildable")
elseif(NOT BOOST_ROOT)
if(Boost_INCLUDE_DIRS)
set(BOOST_ROOT ${Boost_INCLUDE_DIRS})
else()
get_filename_component(BOOST_ROOT ${B2_EXE} DIRECTORY)
endif()
endif()
# The value for BOOST_ROOT will be determined based on
# 1) The environment BOOST_ROOT
# 2) The Boost_INCLUDE_DIRS found by `get_boost`
# 3) The folder the `b2` executable is found in.
# If those checks don't yield the correct path, BOOST_ROOT
# can be defined on the cmake command line:
# cmake <path> -DBOOST_ROOT=<boost_path>
if(BOOST_ROOT)
set(B2_PARAMS "-sBOOST_ROOT=${BOOST_ROOT}")
endif()
# Find bash to help Windows avoid file association problems
find_program(
BASH_EXE
NAMES bash sh
DOC "Location of the bash shell executable"
)
if(${BASH_EXE} STREQUAL "BASH_EXE-NOTFOUND")
message(WARNING
"Unable to find bash executable. docs target may not be buildable")
set(BASH_EXE "")
endif()
add_custom_target(docs
COMMAND ${CMAKE_COMMAND} -E env "PATH=$ENV{PATH} " ${BASH_EXE} ./makeqbk.sh
COMMAND ${B2_EXE} ${B2_PARAMS}
BYPRODUCTS "${CMAKE_SOURCE_DIR}/docs/html/index.html"
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}/docs"
SOURCES "${doc_srcs}"
)
set_startup_project(rippled)
foreach(target IN LISTS targets)
target_link_libraries(${target}
${OPENSSL_LIBRARIES} ${PROTOBUF_LIBRARIES} ${SANITIZER_LIBRARIES})
link_common_libraries(${target})
endforeach()
if (NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
message(WARNING "Rippled requires a 64 bit target architecture.\n"
"The most likely cause of this warning is trying to build rippled with a 32-bit OS.")
endif()

File diff suppressed because it is too large Load Diff

11
Jamroot Normal file
View File

@@ -0,0 +1,11 @@
#
# Copyright (c) 2013-2016 Vinnie Falco (vinnie dot falco at gmail dot com)
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
import boost ;
boost.use-project ;

BIN
LICENSE Normal file

Binary file not shown.

View File

@@ -1,17 +0,0 @@
ISC License
Copyright (c) 2011, Arthur Britto, David Schwartz, Jed McCaleb, Vinnie Falco, Bob Way, Eric Lombrozo, Nikolaos D. Bougalis, Howard Hinnant.
Copyright (c) 2012-2020, the XRP Ledger developers.
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

124
README.md
View File

@@ -1,71 +1,91 @@
[![codecov](https://codecov.io/gh/XRPLF/rippled/graph/badge.svg?token=WyFr5ajq3O)](https://codecov.io/gh/XRPLF/rippled)
![Ripple](/images/ripple.png)
# The XRP Ledger
# What is Ripple?
Ripple is a network of computers which use the [Ripple consensus algorithm]
(https://www.youtube.com/watch?v=pj1QVb1vlC0) to atomically settle and record
transactions on a secure distributed database, the Ripple Consensus Ledger
(RCL). Because of its distributed nature, the RCL offers transaction immutability
without a central operator. The RCL contains a built-in currency exchange and its
path-finding algorithm finds competitive exchange rates across order books
and currency pairs.
The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powered by a network of peer-to-peer nodes. The XRP Ledger uses a novel Byzantine Fault Tolerant consensus algorithm to settle and record transactions in a secure distributed database without a central operator.
### Key Features
- **Distributed**
- Direct account-to-account settlement with no central operator
- Decentralized global market for competitive FX
- **Secure**
- Transactions are cryptographically signed using ECDSA or Ed25519
- Multi-signing capabilities
- **Scalable**
- Capacity to process the worlds cross-border payments volume
- Easy access to liquidity through a competitive FX marketplace
## XRP
[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP.
## Cross-border payments
Ripple enables banks to settle cross-border payments in real-time, with
end-to-end transparency, and at lower costs. Banks can provide liquidity
for FX themselves or source it from third parties.
## rippled
The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html).
As Ripple adoption grows, so do the number of currencies and counterparties.
Liquidity providers need to maintain accounts with each counterparty for
each currency a capital- and time-intensive endeavor that spreads liquidity
thin. Further, some transactions, such as exotic currency trades, will require
multiple trading parties, who each layer costs to the transaction. Thin
liquidity and many intermediary trading parties make competitive pricing
challenging.
If you are interested in running an **API Server** (including a **Full History Server**), take a look at [Clio](https://github.com/XRPLF/clio). (rippled Reporting Mode has been replaced by Clio.)
![Flow - Direct](images/flow1.png)
### Build from Source
### XRP as a Bridge Currency
Ripple can bridge even exotic currency pairs directly through XRP. Similar to
USD in todays currency market, XRP allows liquidity providers to focus on
offering competitive FX rates on fewer pairs and adding depth to order books.
Unlike USD, trading through XRP does not require bank accounts, service fees,
counterparty risk, or additional operational costs. By using XRP, liquidity
providers can specialize in certain currency corridors, reduce operational
costs, and ultimately, offer more competitive FX pricing.
* [Read the build instructions in `BUILD.md`](BUILD.md)
* If you encounter any issues, please [open an issue](https://github.com/XRPLF/rippled/issues)
![Flow - Bridged over XRP](images/flow2.png)
## Key Features of the XRP Ledger
# rippled - Ripple server
`rippled` is the reference server implementation of the Ripple
protocol. To learn more about how to build and run a `rippled`
server, visit https://ripple.com/build/rippled-setup/
- **[Censorship-Resistant Transaction Processing][]:** No single party decides which transactions succeed or fail, and no one can "roll back" a transaction after it completes. As long as those who choose to participate in the network keep it healthy, they can settle transactions in seconds.
- **[Fast, Efficient Consensus Algorithm][]:** The XRP Ledger's consensus algorithm settles transactions in 4 to 5 seconds, processing at a throughput of up to 1500 transactions per second. These properties put XRP at least an order of magnitude ahead of other top digital assets.
- **[Finite XRP Supply][]:** When the XRP Ledger began, 100 billion XRP were created, and no more XRP will ever be created. The available supply of XRP decreases slowly over time as small amounts are destroyed to pay transaction costs.
- **[Responsible Software Governance][]:** A team of full-time, world-class developers at Ripple maintain and continually improve the XRP Ledger's underlying software with contributions from the open-source community. Ripple acts as a steward for the technology and an advocate for its interests, and builds constructive relationships with governments and financial institutions worldwide.
- **[Secure, Adaptable Cryptography][]:** The XRP Ledger relies on industry standard digital signature systems like ECDSA (the same scheme used by Bitcoin) but also supports modern, efficient algorithms like Ed25519. The extensible nature of the XRP Ledger's software makes it possible to add and disable algorithms as the state of the art in cryptography advances.
- **[Modern Features for Smart Contracts][]:** Features like Escrow, Checks, and Payment Channels support cutting-edge financial applications including the [Interledger Protocol](https://interledger.org/). This toolbox of advanced features comes with safety features like a process for amending the network and separate checks against invariant constraints.
- **[On-Ledger Decentralized Exchange][]:** In addition to all the features that make XRP useful on its own, the XRP Ledger also has a fully-functional accounting system for tracking and trading obligations denominated in any way users want, and an exchange built into the protocol. The XRP Ledger can settle long, cross-currency payment paths and exchanges of multiple currencies in atomic transactions, bridging gaps of trust with XRP.
[![travis-ci.org: Build Status](https://travis-ci.org/ripple/rippled.png?branch=develop)](https://travis-ci.org/ripple/rippled)
[![codecov.io: Code Coverage](https://codecov.io/gh/ripple/rippled/branch/develop/graph/badge.svg)](https://codecov.io/gh/ripple/rippled)
[Censorship-Resistant Transaction Processing]: https://xrpl.org/xrp-ledger-overview.html#censorship-resistant-transaction-processing
[Fast, Efficient Consensus Algorithm]: https://xrpl.org/xrp-ledger-overview.html#fast-efficient-consensus-algorithm
[Finite XRP Supply]: https://xrpl.org/xrp-ledger-overview.html#finite-xrp-supply
[Responsible Software Governance]: https://xrpl.org/xrp-ledger-overview.html#responsible-software-governance
[Secure, Adaptable Cryptography]: https://xrpl.org/xrp-ledger-overview.html#secure-adaptable-cryptography
[Modern Features for Smart Contracts]: https://xrpl.org/xrp-ledger-overview.html#modern-features-for-smart-contracts
[On-Ledger Decentralized Exchange]: https://xrpl.org/xrp-ledger-overview.html#on-ledger-decentralized-exchange
### License
`rippled` is open source and permissively licensed under the
ISC license. See the LICENSE file for more details.
#### Repository Contents
## Source Code
| Folder | Contents |
|---------|----------|
| ./bin | Scripts and data files for Ripple integrators. |
| ./build | Intermediate and final build outputs. |
| ./Builds| Platform or IDE-specific project files. |
| ./doc | Documentation and example configuration files. |
| ./src | Source code. |
Here are some good places to start learning the source code:
Some of the directories under `src` are external repositories inlined via
git-subtree. See the corresponding README for more details.
- Read the markdown files in the source tree: `src/ripple/**/*.md`.
- Read [the levelization document](./Builds/levelization) to get an idea of the internal dependency graph.
- In the big picture, the `main` function constructs an `ApplicationImp` object, which implements the `Application` virtual interface. Almost every component in the application takes an `Application&` parameter in its constructor, typically named `app` and stored as a member variable `app_`. This allows most components to depend on any other component.
##For more information:
### Repository Contents
* [Ripple Knowledge Center](https://ripple.com/learn/)
* [Ripple Developer Center](https://ripple.com/build/)
* [Ripple Whitepapers & Reports](https://ripple.com/whitepapers-reports/)
* [Ripple Consensus Whitepaper](https://ripple.com/consensus-whitepaper/)
* [Ripple Solutions Guide](https://ripple.com/files/ripple_solutions_guide.pdf)
| Folder | Contents |
|:-----------|:-------------------------------------------------|
| `./bin` | Scripts and data files for Ripple integrators. |
| `./Builds` | Platform-specific guides for building `rippled`. |
| `./docs` | Source documentation files and doxygen config. |
| `./cfg` | Example configuration files. |
| `./src` | Source code. |
To learn about how Ripple is transforming global payments visit
[https://ripple.com/contact/](https://ripple.com/contact/)
Some of the directories under `src` are external repositories included using
git-subtree. See those directories' README files for more details.
- - -
Copyright © 2015, Ripple Labs. All rights reserved.
## Additional Documentation
* [XRP Ledger Dev Portal](https://xrpl.org/)
* [Setup and Installation](https://xrpl.org/install-rippled.html)
* [Source Documentation (Doxygen)](https://xrplf.github.io/rippled/)
## See Also
* [Clio API Server for the XRP Ledger](https://github.com/XRPLF/clio)
* [Mailing List for Release Announcements](https://groups.google.com/g/ripple-server)
* [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi)
Portions of this document, including but not limited to the Ripple logo,
images and image templates are the property of Ripple Labs and cannot be
copied or used without permission.

File diff suppressed because it is too large Load Diff

1283
SConstruct Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,149 +0,0 @@
### Operating an XRP Ledger server securely
For more details on operating an XRP Ledger server securely, please visit https://xrpl.org/manage-the-rippled-server.html.
# Security Policy
## Supported Versions
Software constantly evolves. In order to focus resources, we only generally only accept vulnerability reports that affect recent and current versions of the software. We always accept reports for issues present in the **master**, **release** or **develop** branches, and with proposed, [open pull requests](https://github.com/ripple/rippled/pulls).
## Identifying and Reporting Vulnerabilities
We take security seriously and we do our best to ensure that all our releases are bug free. But we aren't perfect and sometimes things will slip through.
### Responsible Investigation
We urge you to examine our code carefully and responsibly, and to disclose any issues that you identify in a responsible fashion.
Responsible investigation includes, but isn't limited to, the following:
- Not performing tests on the main network. If testing is necessary, use the [Testnet or Devnet](https://xrpl.org/xrp-testnet-faucet.html).
- Not targeting physical security measures, or attempting to use social engineering, spam, distributed denial of service (DDOS) attacks, etc.
- Investigating bugs in a way that makes a reasonable, good faith effort not to be disruptive or harmful to the XRP Ledger and the broader ecosystem.
### Responsible Disclosure
If you discover a vulnerability or potential threat, or if you _think_
you have, please reach out by dropping an email using the contact
information below.
Your report should include the following:
- Your contact information (typically, an email address);
- The description of the vulnerability;
- The attack scenario (if any);
- The steps to reproduce the vulnerability;
- Any other relevant details or artifacts, including code, scripts or patches.
In your email, please describe the issue or potential threat. If possible, include a "repro" (code that can reproduce the issue) or describe the best way to reproduce and replicate the issue. Please make your report as detailed and comprehensive as possible.
For more information on responsible disclosure, please read this [Wikipedia article](https://en.wikipedia.org/wiki/Responsible_disclosure).
## Report Handling Process
Please report the bug directly to us and limit further disclosure. If you want to prove that you knew the bug as of a given time, consider using a cryptographic precommitment: hash the content of your report and publish the hash on a medium of your choice (e.g. on Twitter or as a memo in a transaction) as "proof" that you had written the text at a given point in time.
Once we receive a report, we:
1. Assign two people to independently evaluate the report;
2. Consider their recommendations;
3. If action is necessary, formulate a plan to address the issue;
4. Communicate privately with the reporter to explain our plan.
5. Prepare, test and release a version which fixes the issue; and
6. Announce the vulnerability publicly.
We will triage and respond to your disclosure within 24 hours. Beyond that, we will work to analyze the issue in more detail, formulate, develop and test a fix.
While we commit to responding with 24 hours of your initial report with our triage assessment, we cannot guarantee a response time for the remaining steps. We will communicate with you throughout this process, letting you know where we are and keeping you updated on the timeframe.
## Bug Bounty Program
[Ripple](https://ripple.com) is generously sponsoring a bug bounty program for vulnerabilities in [`rippled`](https://github.com/XRPLF/rippled) (and other related projects, like [`xrpl.js`](https://github.com/XRPLF/xrpl.js), [`xrpl-py`](https://github.com/XRPLF/xrpl-py), [`xrpl4j`](https://github.com/XRPLF/xrpl4j)).
This program allows us to recognize and reward individuals or groups that identify and report bugs. In summary, in order to qualify for a bounty, the bug must be:
1. **In scope**. Only bugs in software under the scope of the program qualify. Currently, that means `rippled`, `xrpl.js`, `xrpl-py`, `xrpl4j`.
2. **Relevant**. A security issue, posing a danger to user funds, privacy, or the operation of the XRP Ledger.
3. **Original and previously unknown**. Bugs that are already known and discussed in public do not qualify. Previously reported bugs, even if publicly unknown, are not eligible.
4. **Specific**. We welcome general security advice or recommendations, but we cannot pay bounties for that.
5. **Fixable**. There has to be something we can do to permanently fix the problem. Note that bugs in other peoples software may still qualify in some cases. For example, if you find a bug in a library that we use which can compromise the security of software that is in scope and we can get it fixed, you may qualify for a bounty.
6. **Unused**. If you use the exploit to attack the XRP Ledger, you do not qualify for a bounty. If you report a vulnerability used in an ongoing or past attack and there is specific, concrete evidence that suggests you are the attacker we reserve the right not to pay a bounty.
The amount paid varies dramatically. Vulnerabilities that are harmless on their own, but could form part of a critical exploit will usually receive a bounty. Full-blown exploits can receive much higher bounties. Please dont hold back partial vulnerabilities while trying to construct a full-blown exploit. We will pay a bounty to anyone who reports a complete chain of vulnerabilities even if they have reported each component of the exploit separately and those vulnerabilities have been fixed in the meantime. However, to qualify for a the full bounty, you must to have been the first to report each of the partial exploits.
### Contacting Us
To report a qualifying bug, please send a detailed report to:
|Email Address|bugs@ripple.com |
|:-----------:|:----------------------------------------------------|
|Short Key ID | `0xC57929BE` |
|Long Key ID | `0xCD49A0AFC57929BE` |
|Fingerprint | `24E6 3B02 37E0 FA9C 5E96 8974 CD49 A0AF C579 29BE` |
The full PGP key for this address, which is also available on several key servers (e.g. on [keyserver.ubuntu.com](https://keyserver.ubuntu.com)), is:
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFUwGHYBEAC0wpGpBPkd8W1UdQjg9+cEFzeIEJRaoZoeuJD8mofwI5Ejnjdt
kCpUYEDal0ygkKobu8SzOoATcDl18iCrScX39VpTm96vISFZMhmOryYCIp4QLJNN
4HKc2ZdBj6W4igNi6vj5Qo6JMyGpLY2mz4CZskbt0TNuUxWrGood+UrCzpY8x7/N
a93fcvNw+prgCr0rCH3hAPmAFfsOBbtGzNnmq7xf3jg5r4Z4sDiNIF1X1y53DAfV
rWDx49IKsuCEJfPMp1MnBSvDvLaQ2hKXs+cOpx1BCZgHn3skouEUxxgqbtTzBLt1
xXpmuijsaltWngPnGO7mOAzbpZSdBm82/Emrk9bPMuD0QaLQjWr7HkTSUs6ZsKt4
7CLPdWqxyY/QVw9UaxeHEtWGQGMIQGgVJGh1fjtUr5O1sC9z9jXcQ0HuIHnRCTls
GP7hklJmfH5V4SyAJQ06/hLuEhUJ7dn+BlqCsT0tLmYTgZYNzNcLHcqBFMEZHvHw
9GENMx/tDXgajKql4bJnzuTK0iGU/YepanANLd1JHECJ4jzTtmKOus9SOGlB2/l1
0t0ADDYAS3eqOdOcUvo9ElSLCI5vSVHhShSte/n2FMWU+kMUboTUisEG8CgQnrng
g2CvvQvqDkeOtZeqMcC7HdiZS0q3LJUWtwA/ViwxrVlBDCxiTUXCotyBWwARAQAB
tDBSaXBwbGUgTGFicyBCdWcgQm91bnR5IFByb2dyYW0gPGJ1Z3NAcmlwcGxlLmNv
bT6JAjcEEwEKACEFAlUwGHYCGwMFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ
zUmgr8V5Kb6R0g//SwY/mVJY59k87iL26/KayauSoOcz7xjcST26l4ZHVVX85gOY
HYZl8k0+m8X3zxeYm9a3QAoAml8sfoaFRFQP8ynnefRrLUPaZ2MjbJ0SACMwZNef
T6o7Mi8LBAaiNZdYVyIfX1oM6YXtqYkuJdav6ZCyvVYqc9OvMJPY2ZzJYuI/ZtvQ
/lTndxCeg9ALNX/iezOLGdfMpf4HuIFVwcPPlwGi+HDlB9/bggDEHC8z434SXVFc
aQatXAPcDkjMUweU7y0CZtYEj00HITd4pSX6MqGiHrxlDZTqinCOPs1Ieqp7qufs
MzlM6irLGucxj1+wa16ieyYvEtGaPIsksUKkywx0O7cf8N2qKg+eIkUk6O0Uc6eO
CszizmiXIXy4O6OiLlVHGKkXHMSW9Nwe9GE95O8G9WR8OZCEuDv+mHPAutO+IjdP
PDAAUvy+3XnkceO+HGWRpVvJZfFP2YH4A33InFL5yqlJmSoR/yVingGLxk55bZDM
+HYGR3VeMb8Xj1rf/02qERsZyccMCFdAvKDbTwmvglyHdVLu5sPmktxbBYiemfyJ
qxMxmYXCc9S0hWrWZW7edktBa9NpE58z1mx+hRIrDNbS2sDHrib9PULYCySyVYcF
P+PWEe1CAS5jqkR2ker5td2/pHNnJIycynBEs7l6zbc9fu+nktFJz0q2B+GJAhwE
EAEKAAYFAlUwGaQACgkQ+tiY1qQ2QkjMFw//f2hNY3BPNe+1qbhzumMDCnbTnGif
kLuAGl9OKt81VHG1f6RnaGiLpR696+6Ja45KzH15cQ5JJl5Bgs1YkR/noTGX8IAD
c70eNwiFu8JXTaaeeJrsmFkF9Tueufb364risYkvPP8tNUD3InBFEZT3WN7JKwix
coD4/BwekUwOZVDd/uCFEyhlhZsROxdKNisNo3VtAq2s+3tIBAmTrriFUl0K+ZC5
zgavcpnPN57zMtW9aK+VO3wXqAKYLYmtgxkVzSLUZt2M7JuwOaAdyuYWAneKZPCu
1AXkmyo+d84sd5mZaKOr5xArAFiNMWPUcZL4rkS1Fq4dKtGAqzzR7a7hWtA5o27T
6vynuxZ1n0PPh0er2O/zF4znIjm5RhTlfjp/VmhZdQfpulFEQ/dMxxGkQ9z5IYbX
mTlSDbCSb+FMsanRBJ7Drp5EmBIudVGY6SHI5Re1RQiEh7GoDfUMUwZO+TVDII5R
Ra7WyuimYleJgDo/+7HyfuIyGDaUCVj6pwVtYtYIdOI3tTw1R1Mr0V8yaNVnJghL
CHcEJQL+YHSmiMM3ySil3O6tm1By6lFz8bVe/rgG/5uklQrnjMR37jYboi1orCC4
yeIoQeV0ItlxeTyBwYIV/o1DBNxDevTZvJabC93WiGLw2XFjpZ0q/9+zI2rJUZJh
qxmKP+D4e27lCI65Ag0EVTAYdgEQAMvttYNqeRNBRpSX8fk45WVIV8Fb21fWdwk6
2SkZnJURbiC0LxQnOi7wrtii7DeFZtwM2kFHihS1VHekBnIKKZQSgGoKuFAQMGyu
a426H4ZsSmA9Ufd7kRbvdtEcp7/RTAanhrSL4lkBhaKJrXlxBJ27o3nd7/rh7r3a
OszbPY6DJ5bWClX3KooPTDl/RF2lHn+fweFk58UvuunHIyo4BWJUdilSXIjLun+P
Qaik4ZAsZVwNhdNz05d+vtai4AwbYoO7adboMLRkYaXSQwGytkm+fM6r7OpXHYuS
cR4zB/OK5hxCVEpWfiwN71N2NMvnEMaWd/9uhqxJzyvYgkVUXV9274TUe16pzXnW
ZLfmitjwc91e7mJBBfKNenDdhaLEIlDRwKTLj7k58f9srpMnyZFacntu5pUMNblB
cjXwWxz5ZaQikLnKYhIvrIEwtWPyjqOzNXNvYfZamve/LJ8HmWGCKao3QHoAIDvB
9XBxrDyTJDpxbog6Qu4SY8AdgVlan6c/PsLDc7EUegeYiNTzsOK+eq3G5/E92eIu
TsUXlciypFcRm1q8vLRr+HYYe2mJDo4GetB1zLkAFBcYJm/x9iJQbu0hn5NxJvZO
R0Y5nOJQdyi+muJzKYwhkuzaOlswzqVXkq/7+QCjg7QsycdcwDjiQh3OrsgXHrwl
M7gyafL9ABEBAAGJAh8EGAEKAAkFAlUwGHYCGwwACgkQzUmgr8V5Kb50BxAAhj9T
TwmNrgRldTHszj+Qc+v8RWqV6j+R+zc0cn5XlUa6XFaXI1OFFg71H4dhCPEiYeN0
IrnocyMNvCol+eKIlPKbPTmoixjQ4udPTR1DC1Bx1MyW5FqOrsgBl5t0e1VwEViM
NspSStxu5Hsr6oWz2GD48lXZWJOgoL1RLs+uxjcyjySD/em2fOKASwchYmI+ezRv
plfhAFIMKTSCN2pgVTEOaaz13M0U+MoprThqF1LWzkGkkC7n/1V1f5tn83BWiagG
2N2Q4tHLfyouzMUKnX28kQ9sXfxwmYb2sA9FNIgxy+TdKU2ofLxivoWT8zS189z/
Yj9fErmiMjns2FzEDX+bipAw55X4D/RsaFgC+2x2PDbxeQh6JalRA2Wjq32Ouubx
u+I4QhEDJIcVwt9x6LPDuos1F+M5QW0AiUhKrZJ17UrxOtaquh/nPUL9T3l2qPUn
1ChrZEEEhHO6vA8+jn0+cV9n5xEz30Str9iHnDQ5QyR5LyV4UBPgTdWyQzNVKA69
KsSr9lbHEtQFRzGuBKwt6UlSFv9vPWWJkJit5XDKAlcKuGXj0J8OlltToocGElkF
+gEBZfoOWi/IBjRLrFW2cT3p36DTR5O1Ud/1DLnWRqgWNBLrbs2/KMKE6EnHttyD
7Tz8SQkuxltX/yBXMV3Ddy0t6nWV2SZEfuxJAQI=
=spg4
-----END PGP PUBLIC KEY BLOCK-----
```

134
appveyor.yml Normal file
View File

@@ -0,0 +1,134 @@
# Set environment variables.
environment:
PYTHON: C:/Python27-x64
# We bundle up protoc.exe and only the parts of boost and openssl we need so
# that it's a small download. We also use appveyor's free cache, avoiding fees
# downloading from S3 each time.
# TODO: script to create this package.
RIPPLED_DEPS_PATH: rippled_deps15.02
RIPPLED_DEPS_URL: https://ripple.github.io/Downloads/appveyor/%RIPPLED_DEPS_PATH%.zip
# Other dependencies we just download each time.
PIP_PATH: get-pip.py
PIP_URL: https://bootstrap.pypa.io/%PIP_PATH%
# The % in this URL messes up variable substition, so any updates will
# need to update both PYWIN32_PATH and PYWIN32_URL
PYWIN32_PATH: pywin32-220.win-amd64-py2.7.exe
PYWIN32_URL: https://downloads.sourceforge.net/project/pywin32/pywin32/Build%20220/pywin32-220.win-amd64-py2.7.exe
# Scons honours these environment variables, setting the include/lib paths.
BOOST_ROOT: C:/%RIPPLED_DEPS_PATH%/boost
OPENSSL_ROOT: C:/%RIPPLED_DEPS_PATH%/openssl
matrix:
# This build works, but our current Appveyor config runs matrix builds
# sequentially, and the one build is already slow enough.
# - build: scons
# target: msvc.debug
- build: cmake
target: msvc.debug
buildconfig: Debug
os: Visual Studio 2015
# At the end of each successful build we cache this directory.
# https://www.appveyor.com/docs/build-cache/
# Resulting archive should not exceed 100 MB.
cache:
- 'C:\%RIPPLED_DEPS_PATH%'
- '%PIP_PATH%'
- '%PYWIN32_PATH%'
# This means we'll download a zip of the branch we want, rather than the full
# history.
shallow_clone: true
install:
# We want easy_install, python and protoc.exe on PATH.
- SET PATH=%PYTHON%;%PYTHON%/Scripts;C:/%RIPPLED_DEPS_PATH%;%PATH%
# `ps` prefix means the command is executed by powershell.
- ps: |
if ($env:build -eq "scons") {
if(-not(Test-Path $env:PIP_PATH)) {
echo "Download from $env:PIP_URL"
Start-FileDownload $env:PIP_URL
}
if(-not(Test-Path $env:PYWIN32_PATH)) {
echo "Download from $env:PYWIN32_URL"
Start-FileDownload $env:PYWIN32_URL
}
}
- bin/ci/windows/install-dependencies.bat
# Download dependencies if appveyor didn't restore them from the cache.
# Use 7zip to unzip.
- ps: |
if (-not(Test-Path 'C:/$env:RIPPLED_DEPS_PATH')) {
echo "Download from $env:RIPPLED_DEPS_URL"
Start-FileDownload "$env:RIPPLED_DEPS_URL"
7z x "$($env:RIPPLED_DEPS_PATH).zip" -oC:\ -y > $null
if ($LastExitCode -ne 0) { throw "7z failed" }
}
# Newer DEPS include a versions file.
# Dump it so we can verify correct behavior.
- ps: |
if (Test-Path "C:/$env:RIPPLED_DEPS_PATH/versions.txt") {
cat "C:/$env:RIPPLED_DEPS_PATH/versions.txt"
}
# TODO: This is giving me grief
# artifacts:
# # Save rippled.exe in the cloud after each build.
# - path: "build\\rippled.exe"
build_script:
# We set the environment variables needed to put compilers on the PATH.
- '"%VS140COMNTOOLS%../../VC/vcvarsall.bat" x86_amd64'
# Show which version of the compiler we are using.
- cl
- ps: |
if ($env:build -eq "scons") {
# Build with scons
scons $env:target -j%NUMBER_OF_PROCESSORS%
if ($LastExitCode -ne 0) { throw "scons build failed" }
}
else
{
# Build with cmake
cmake --version
$cmake_target="$($env:target).ci"
"$cmake_target"
New-Item -ItemType Directory -Force -Path "build/$cmake_target"
Push-Location "build/$cmake_target"
cmake -G"Visual Studio 14 2015 Win64" -Dtarget="$cmake_target" ../..
if ($LastExitCode -ne 0) { throw "CMake failed" }
cmake --build . --config $env:buildconfig -- -m
if ($LastExitCode -ne 0) { throw "CMake build failed" }
Pop-Location
}
after_build:
- ps: |
if ($env:build -eq "scons") {
cp build/$($env:target)/rippled.exe build
ls build
$exe="build/rippled"
}
else
{
$exe="build/$cmake_target/$env:buildconfig/rippled"
}
"Exe is at $exe"
test_script:
- ps: |
& {
# Run the rippled unit tests
& $exe --unittest
# https://connect.microsoft.com/PowerShell/feedback/details/751703/option-to-stop-script-if-command-line-exe-fails
if ($LastExitCode -ne 0) { throw "Unit tests failed" }
}

1
bin/LT Symbolic link
View File

@@ -0,0 +1 @@
python/LedgerTool.py

92
bin/ci/ubuntu/build-and-test.sh Executable file
View File

@@ -0,0 +1,92 @@
#!/bin/bash -u
# We use set -e and bash with -u to bail on first non zero exit code of any
# processes launched or upon any unbound variable.
# We use set -x to print commands before running them to help with
# debugging.
set -ex
__dirname=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
echo "using CC: $CC"
echo "using TARGET: $TARGET"
# Ensure APP defaults to rippled if it's not set.
: ${APP:=rippled}
if [[ ${BUILD:-scons} == "cmake" ]]; then
echo "cmake building ${APP}"
CMAKE_TARGET=$CC.$TARGET
if [[ ${CI:-} == true ]]; then
CMAKE_TARGET=$CMAKE_TARGET.ci
fi
mkdir -p "build/${CMAKE_TARGET}"
pushd "build/${CMAKE_TARGET}"
cmake ../.. -Dtarget=$CMAKE_TARGET
cmake --build . -- -j${NUM_PROCESSORS:-2}
popd
export APP_PATH="$PWD/build/${CMAKE_TARGET}/${APP}"
echo "using APP_PATH: $APP_PATH"
else
export APP_PATH="$PWD/build/$CC.$TARGET/${APP}"
echo "using APP_PATH: $APP_PATH"
# Make sure vcxproj is up to date
scons vcxproj
git diff --exit-code
# $CC will be either `clang` or `gcc`
# http://docs.travis-ci.com/user/migrating-from-legacy/?utm_source=legacy-notice&utm_medium=banner&utm_campaign=legacy-upgrade
# indicates that 2 cores are available to containers.
scons -j${NUM_PROCESSORS:-2} $CC.$TARGET
fi
# We can be sure we're using the build/$CC.$TARGET variant
# (-f so never err)
rm -f build/${APP}
# See what we've actually built
ldd $APP_PATH
if [[ ${APP} == "rippled" ]]; then
export APP_ARGS="--unittest"
# Only report on src/ripple files
export LCOV_FILES="*/src/ripple/*"
# Nothing to explicitly exclude
export LCOV_EXCLUDE_FILES="LCOV_NO_EXCLUDE"
else
: ${APP_ARGS:=}
: ${LCOV_FILES:="*/src/*"}
# Don't exclude anything
: ${LCOV_EXCLUDE_FILES:="LCOV_NO_EXCLUDE"}
fi
if [[ $TARGET == "coverage" ]]; then
export PATH=$PATH:$LCOV_ROOT/usr/bin
# Create baseline coverage data file
lcov --no-external -c -i -d . -o baseline.info
fi
# Execute unit tests under gdb, printing a call stack
# if we get a crash.
gdb -return-child-result -quiet -batch \
-ex "set env MALLOC_CHECK_=3" \
-ex "set print thread-events off" \
-ex run \
-ex "thread apply all backtrace full" \
-ex "quit" \
--args $APP_PATH --unittest
if [[ $TARGET == "coverage" ]]; then
# Create test coverage data file
lcov --no-external -c -d . -o tests.info
# Combine baseline and test coverage data
lcov -a baseline.info -a tests.info -o lcov-all.info
# Included files
lcov -e "lcov-all.info" "${LCOV_FILES}" -o lcov.pre.info
# Excluded files
lcov --remove lcov.pre.info "${LCOV_EXCLUDE_FILES}" -o lcov.info
# Push the results (lcov.info) to codecov
codecov -X gcov # don't even try and look for .gcov files ;)
fi

View File

@@ -0,0 +1,67 @@
#!/bin/bash -u
# Exit if anything fails. Echo commands to aid debugging.
set -ex
# Target working dir - defaults to current dir.
# Can be set from caller, or in the first parameter
TWD=$( cd ${TWD:-${1:-${PWD:-$( pwd )}}}; pwd )
echo "Target path is: $TWD"
# Override gcc version to $GCC_VER.
# Put an appropriate symlink at the front of the path.
mkdir -v $HOME/bin
for g in gcc g++ gcov gcc-ar gcc-nm gcc-ranlib
do
test -x $( type -p ${g}-$GCC_VER )
ln -sv $(type -p ${g}-$GCC_VER) $HOME/bin/${g}
done
if [[ -n ${CLANG_VER:-} ]]; then
# There are cases where the directory exists, but the exe is not available.
# Use this workaround for now.
if [[ ! -x ${TWD}/llvm-${LLVM_VERSION}/bin/llvm-config && -d ${TWD}/llvm-${LLVM_VERSION} ]]; then
rm -fr ${TWD}/llvm-${LLVM_VERSION}
fi
if [[ ! -d ${TWD}/llvm-${LLVM_VERSION} ]]; then
mkdir ${TWD}/llvm-${LLVM_VERSION}
LLVM_URL="http://llvm.org/releases/${LLVM_VERSION}/clang+llvm-${LLVM_VERSION}-x86_64-linux-gnu-ubuntu-14.04.tar.xz"
wget -O - ${LLVM_URL} | tar -Jxvf - --strip 1 -C ${TWD}/llvm-${LLVM_VERSION}
fi
${TWD}/llvm-${LLVM_VERSION}/bin/llvm-config --version;
export LLVM_CONFIG="${TWD}/llvm-${LLVM_VERSION}/bin/llvm-config";
fi
if [[ ${BUILD:-} == cmake ]]; then
# There are cases where the directory exists, but the exe is not available.
# Use this workaround for now.
if [[ ! -x ${TWD}/cmake/bin/cmake && -d ${TWD}/cmake ]]; then
rm -fr ${TWD}/cmake
fi
if [[ ! -d ${TWD}/cmake ]]; then
CMAKE_URL="https://www.cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.tar.gz"
wget --version
# wget version 1.13.4 thinks this certificate is invalid, even though it's fine.
# "ERROR: no certificate subject alternative name matches"
# See also: https://github.com/travis-ci/travis-ci/issues/5059
mkdir ${TWD}/cmake &&
wget -O - --no-check-certificate ${CMAKE_URL} | tar --strip-components=1 -xz -C ${TWD}/cmake
cmake --version
fi
fi
# What versions are we ACTUALLY running?
if [ -x $HOME/bin/g++ ]; then
$HOME/bin/g++ -v
fi
pip install --user https://github.com/codecov/codecov-python/archive/master.zip
bash bin/sh/install-boost.sh
# Install lcov
# Download the archive
wget https://github.com/linux-test-project/lcov/releases/download/v1.12/lcov-1.12.tar.gz
# Extract to ~/lcov-1.12
tar xfvz lcov-1.12.tar.gz -C $HOME
# Set install path
mkdir -p $LCOV_ROOT
cd $HOME/lcov-1.12 && make install PREFIX=$LCOV_ROOT

View File

@@ -0,0 +1,13 @@
if "%build%" == "scons" (
rem Installing pip will install setuptools/easy_install.
python "%PIP_PATH%"
rem Pip has some problems installing scons on windows so we use easy install.
rem - easy_install scons
rem Workaround
easy_install https://pypi.python.org/packages/source/S/SCons/scons-2.5.0.tar.gz#md5=bda5530a70a41a7831d83c8b191c021e
rem Scons has problems with parallel builds on windows without pywin32.
easy_install "%PYWIN32_PATH%"
rem (easy_install can do headless installs of .exe wizards)
)

84
bin/getInfoRippled.sh Normal file
View File

@@ -0,0 +1,84 @@
#!/usr/bin/env bash
rippled_exe=/opt/ripple/bin/rippled
conf_file=/etc/opt/ripple/rippled.cfg
while getopts ":e:c:" opt; do
case $opt in
e)
rippled_exe=${OPTARG}
;;
c)
conf_file=${OPTARG}
;;
\?)
echo "Invalid option: -$OPTARG"
esac
done
tmp_loc=$(mktemp -d --tmpdir ripple_info.XXXX)
cd /tmp
chmod 751 ripple_info.*
cd ~
echo ${tmp_loc}
cleaned_conf=${tmp_loc}/cleaned_rippled_cfg.txt
if [[ -f ${conf_file} ]]
then
db=$(sed -r -e 's/\<s[a-zA-Z0-9]{28}\>/secretsecretsecretsecretmaybe/g' ${conf_file} |\
awk -v OUT_FILE=${cleaned_conf} '
BEGIN {skip=0; db_path="";print > OUT_FILE}
/^\[validation_seed\]/ {skip=1; next}
/^\[node_seed\]/ {skip=1; next}
/^\[.*\]/ {skip=0}
skip==1 {next}
save==1 {save=0;db_path=$0}
/^\[database_path\]/ {save=1}
{print >> OUT_FILE}
END {print db_path}
')
fi
echo "database_path: ${db}"
df ${db} > ${tmp_loc}/db_path_df.txt
echo
# Send output from this script to a log file
## this captures any messages
## or errors from the script itself
log_file=${tmp_loc}/get_info.log
exec 3>&1 1>>${log_file} 2>&1
## Send all stdout files to /tmp
if [[ -x ${rippled_exe} ]]
then
pgrep rippled && \
${rippled_exe} --conf ${conf_file} \
-- server_info > ${tmp_loc}/server_info.txt
fi
df -h > ${tmp_loc}/free_disk_space.txt
cat /proc/meminfo > ${tmp_loc}/amount_mem.txt
cat /proc/swaps > ${tmp_loc}/swap_space.txt
ulimit -a > ${tmp_loc}/reported_current_limits.txt
for dev_path in $(df | awk '$1 ~ /^\/dev\// {print $1}'); do
# strip numbers from end and remove '/dev/'
dev=$(basename ${dev_path%%[0-9]})
if [[ "$(cat /sys/block/${dev}/queue/rotational)" = 0 ]]
then
echo "${dev} : SSD" >> ${tmp_loc}/is_ssd.txt
else
echo "${dev} : NO SSD" >> ${tmp_loc}/is_ssd.txt
fi
done
pushd ${tmp_loc}
tar -czvf info-package.tar.gz *.txt *.log
popd
echo "Use the following command on your local machine to download from your rippled instance: scp <remote_rippled_username>@<remote_host>:${tmp_loc}/info-package.tar.gz <path/to/local_machine/directory>"| tee /dev/fd/3

View File

@@ -1,150 +0,0 @@
#!/usr/bin/env bash
# This script generates information about your rippled installation
# and system. It can be used to help debug issues that you may face
# in your installation. While this script endeavors to not display any
# sensitive information, it is recommended that you read the output
# before sharing with any third parties.
rippled_exe=/opt/ripple/bin/rippled
conf_file=/etc/opt/ripple/rippled.cfg
while getopts ":e:c:" opt; do
case $opt in
e)
rippled_exe=${OPTARG}
;;
c)
conf_file=${OPTARG}
;;
\?)
echo "Invalid option: -$OPTARG"
exit -1
esac
done
tmp_loc=$(mktemp -d --tmpdir ripple_info.XXXXX)
chmod 751 ${tmp_loc}
awk_prog=${tmp_loc}/cfg.awk
summary_out=${tmp_loc}/rippled_info.md
printf "# rippled report info\n\n> generated at %s\n" "$(date -R)" > ${summary_out}
function log_section {
printf "\n## %s\n" "$*" >> ${summary_out}
while read -r l; do
echo " $l" >> ${summary_out}
done </dev/stdin
}
function join_by {
local IFS="$1"; shift; echo "$*";
}
if [[ -f ${conf_file} ]] ; then
exclude=( ips ips_fixed node_seed validation_seed validator_token )
cleaned_conf=${tmp_loc}/cleaned_rippled_cfg.txt
cat << 'EOP' >> ${awk_prog}
BEGIN {FS="[[:space:]]*=[[:space:]]*"; skip=0; db_path=""; print > OUT_FILE; split(exl,exa,"|")}
/^#/ {next}
save==2 && /^[[:space:]]*$/ {next}
/^\[.+\]$/ {
section=tolower(gensub(/^\[[[:space:]]*([a-zA-Z_]+)[[:space:]]*\]$/, "\\1", "g"))
skip = 0
for (i in exa) {
if (section == exa[i])
skip = 1
}
if (section == "database_path")
save = 1
}
skip==1 {next}
save==2 {save=0; db_path=$0}
save==1 {save=2}
$1 ~ /password/ {$0=$1"=<redacted>"}
{print >> OUT_FILE}
END {print db_path}
EOP
db=$(\
sed -r -e 's/\<s[[:alnum:]]{28}\>/<redactedsecret>/g;s/^[[:space:]]*//;s/[[:space:]]*$//' ${conf_file} |\
awk -v OUT_FILE=${cleaned_conf} -v exl="$(join_by '|' "${exclude[@]}")" -f ${awk_prog})
rm ${awk_prog}
cat ${cleaned_conf} | log_section "cleaned config file"
rm ${cleaned_conf}
echo "${db}" | log_section "database path"
df ${db} | log_section "df: database"
fi
# Send output from this script to a log file
## this captures any messages
## or errors from the script itself
log_file=${tmp_loc}/get_info.log
exec 3>&1 1>>${log_file} 2>&1
## Send all stdout files to /tmp
if [[ -x ${rippled_exe} ]] ; then
pgrep rippled && \
${rippled_exe} --conf ${conf_file} \
-- server_info | log_section "server info"
fi
cat /proc/meminfo | log_section "meminfo"
cat /proc/swaps | log_section "swap space"
ulimit -a | log_section "ulimit"
if command -v lshw >/dev/null 2>&1 ; then
lshw 2>/dev/null | log_section "hardware info"
else
lscpu > ${tmp_loc}/hw_info.txt
hwinfo >> ${tmp_loc}/hw_info.txt
lspci >> ${tmp_loc}/hw_info.txt
lsblk >> ${tmp_loc}/hw_info.txt
cat ${tmp_loc}/hw_info.txt | log_section "hardware info"
rm ${tmp_loc}/hw_info.txt
fi
if command -v iostat >/dev/null 2>&1 ; then
iostat -t -d -x 2 6 | log_section "iostat"
fi
df -h | log_section "free disk space"
drives=($(df | awk '$1 ~ /^\/dev\// {print $1}' | xargs -n 1 basename))
block_devs=($(ls /sys/block/))
for d in "${drives[@]}"; do
for dev in "${block_devs[@]}"; do
#echo "D: [$d], DEV: [$dev]"
if [[ $d =~ $dev ]]; then
# this file (if exists) has 0 for SSD and 1 for HDD
if [[ "$(cat /sys/block/${dev}/queue/rotational 2>/dev/null)" == 0 ]] ; then
echo "${d} : SSD" >> ${tmp_loc}/is_ssd.txt
else
echo "${d} : NO SSD" >> ${tmp_loc}/is_ssd.txt
fi
fi
done
done
if [[ -f ${tmp_loc}/is_ssd.txt ]] ; then
cat ${tmp_loc}/is_ssd.txt | log_section "SSD"
rm ${tmp_loc}/is_ssd.txt
fi
cat ${log_file} | log_section "script log"
cat << MSG | tee /dev/fd/3
####################################################
rippled info has been gathered. Please copy the
contents of ${summary_out}
to a github gist at https://gist.github.com/
PLEASE REVIEW THIS FILE FOR ANY SENSITIVE DATA
BEFORE POSTING! We have tried our best to omit
any sensitive information from this file, but you
should verify before posting.
####################################################
MSG

View File

@@ -1,86 +0,0 @@
#!/bin/bash
if [[ $# -ne 1 || "$1" == "--help" || "$1" == "-h" ]]
then
name=$( basename $0 )
cat <<- USAGE
Usage: $name <username>
Where <username> is the Github username of the upstream repo. e.g. XRPLF
USAGE
exit 0
fi
# Create upstream remotes based on origin
shift
user="$1"
# Get the origin URL. Expect it be an SSH-style URL
origin=$( git remote get-url origin )
if [[ "${origin}" == "" ]]
then
echo Invalid origin remote >&2
exit 1
fi
# echo "Origin: ${origin}"
# Parse the origin
ifs_orig="${IFS}"
IFS=':' read remote originpath <<< "${origin}"
# echo "Remote: ${remote}, Originpath: ${originpath}"
IFS='@' read sshuser server <<< "${remote}"
# echo "SSHUser: ${sshuser}, Server: ${server}"
IFS='/' read originuser repo <<< "${originpath}"
# echo "Originuser: ${originuser}, Repo: ${repo}"
if [[ "${sshuser}" == "" || "${server}" == "" || "${originuser}" == ""
|| "${repo}" == "" ]]
then
echo "Can't parse origin URL: ${origin}" >&2
exit 1
fi
upstream="https://${server}/${user}/${repo}"
upstreampush="${remote}:${user}/${repo}"
upstreamgroup="upstream upstream-push"
current=$( git remote get-url upstream 2>/dev/null )
currentpush=$( git remote get-url upstream-push 2>/dev/null )
currentgroup=$( git config remotes.upstreams )
if [[ "${current}" == "${upstream}" ]]
then
echo "Upstream already set up correctly. Skip"
elif [[ -n "${current}" && "${current}" != "${upstream}" &&
"${current}" != "${upstreampush}" ]]
then
echo "Upstream already set up as: ${current}. Skip"
else
if [[ "${current}" == "${upstreampush}" ]]
then
echo "Upstream set to dangerous push URL. Update."
_run git remote rename upstream upstream-push || \
_run git remote remove upstream
currentpush=$( git remote get-url upstream-push 2>/dev/null )
fi
_run git remote add upstream "${upstream}"
fi
if [[ "${currentpush}" == "${upstreampush}" ]]
then
echo "upstream-push already set up correctly. Skip"
elif [[ -n "${currentpush}" && "${currentpush}" != "${upstreampush}" ]]
then
echo "upstream-push already set up as: ${currentpush}. Skip"
else
_run git remote add upstream-push "${upstreampush}"
fi
if [[ "${currentgroup}" == "${upstreamgroup}" ]]
then
echo "Upstreams group already set up correctly. Skip"
elif [[ -n "${currentgroup}" && "${currentgroup}" != "${upstreamgroup}" ]]
then
echo "Upstreams group already set up as: ${currentgroup}. Skip"
else
_run git config --add remotes.upstreams "${upstreamgroup}"
fi
_run git fetch --jobs=$(nproc) upstreams
exit 0

View File

@@ -1,69 +0,0 @@
#!/bin/bash
if [[ $# -lt 3 || "$1" == "--help" || "$1" = "-h" ]]
then
name=$( basename $0 )
cat <<- USAGE
Usage: $name workbranch base/branch user/branch [user/branch [...]]
* workbranch will be created locally from base/branch
* base/branch and user/branch may be specified as user:branch to allow
easy copying from Github PRs
* Remotes for each user must already be set up
USAGE
exit 0
fi
work="$1"
shift
branches=( $( echo "${@}" | sed "s/:/\//" ) )
base="${branches[0]}"
unset branches[0]
set -e
users=()
for b in "${branches[@]}"
do
users+=( $( echo $b | cut -d/ -f1 ) )
done
users=( $( printf '%s\n' "${users[@]}" | sort -u ) )
git fetch --multiple upstreams "${users[@]}"
git checkout -B "$work" --no-track "$base"
for b in "${branches[@]}"
do
git merge --squash "${b}"
git commit -S # Use the commit message provided on the PR
done
# Make sure the commits look right
git log --show-signature "$base..HEAD"
parts=( $( echo $base | sed "s/\// /" ) )
repo="${parts[0]}"
b="${parts[1]}"
push=$repo
if [[ "$push" == "upstream" ]]
then
push="upstream-push"
fi
if [[ "$repo" == "upstream" ]]
then
repo="upstreams"
fi
cat << PUSH
-------------------------------------------------------------------
This script will not push. Verify everything is correct, then push
to your repo, and create a PR if necessary. Once the PR is approved,
run:
git push $push HEAD:$b
git fetch $repo
-------------------------------------------------------------------
PUSH

View File

@@ -1,58 +0,0 @@
#!/bin/bash
if [[ $# -ne 3 || "$1" == "--help" || "$1" = "-h" ]]
then
name=$( basename $0 )
cat <<- USAGE
Usage: $name workbranch base/branch version
* workbranch will be created locally from base/branch. If it exists,
it will be reused, so make sure you don't overwrite any work.
* base/branch may be specified as user:branch to allow easy copying
from Github PRs.
USAGE
exit 0
fi
work="$1"
shift
base=$( echo "$1" | sed "s/:/\//" )
shift
version=$1
shift
set -e
git fetch upstreams
git checkout -B "${work}" --no-track "${base}"
push=$( git rev-parse --abbrev-ref --symbolic-full-name '@{push}' \
2>/dev/null ) || true
if [[ "${push}" != "" ]]
then
echo "Warning: ${push} may already exist."
fi
build=$( find -name BuildInfo.cpp )
sed 's/\(^.*versionString =\).*$/\1 "'${version}'"/' ${build} > version.cpp && \
diff "${build}" version.cpp && exit 1 || \
mv -vi version.cpp ${build}
git diff
git add ${build}
git commit -S -m "Set version to ${version}"
git log --oneline --first-parent ${base}^..
cat << PUSH
-------------------------------------------------------------------
This script will not push. Verify everything is correct, then push
to your repo, and create a PR as described in CONTRIBUTING.md.
-------------------------------------------------------------------
PUSH

1
bin/manifest Symbolic link
View File

@@ -0,0 +1 @@
python/Manifest.py

View File

@@ -1,218 +0,0 @@
#!/bin/bash
set -o errexit
marker_base=985c80fbc6131f3a8cedd0da7e8af98dfceb13c7
marker_commit=${1:-${marker_base}}
if [ $(git merge-base ${marker_commit} ${marker_base}) != ${marker_base} ]; then
echo "first marker commit not an ancestor: ${marker_commit}"
exit 1
fi
if [ $(git merge-base ${marker_commit} HEAD) != $(git rev-parse --verify ${marker_commit}) ]; then
echo "given marker commit not an ancestor: ${marker_commit}"
exit 1
fi
if [ -e Builds/CMake ]; then
echo move CMake
git mv Builds/CMake cmake
git add --update .
git commit -m 'Move CMake directory' --author 'Pretty Printer <cpp@ripple.com>'
fi
if [ -e src/ripple ]; then
echo move protocol buffers
mkdir -p include/xrpl
if [ -e src/ripple/proto ]; then
git mv src/ripple/proto include/xrpl
fi
extract_list() {
git show ${marker_commit}:Builds/CMake/RippledCore.cmake | \
awk "/END ${1}/ { p = 0 } p && /src\/ripple/; /BEGIN ${1}/ { p = 1 }" | \
sed -e 's#src/ripple/##' -e 's#[^a-z]\+$##'
}
move_files() {
oldroot="$1"; shift
newroot="$1"; shift
detail="$1"; shift
files=("$@")
for file in ${files[@]}; do
if [ ! -e ${oldroot}/${file} ]; then
continue
fi
dir=$(dirname ${file})
if [ $(basename ${dir}) == 'details' ]; then
dir=$(dirname ${dir})
fi
if [ $(basename ${dir}) == 'impl' ]; then
dir="$(dirname ${dir})/${detail}"
fi
mkdir -p ${newroot}/${dir}
git mv ${oldroot}/${file} ${newroot}/${dir}
done
}
echo move libxrpl headers
files=$(extract_list 'LIBXRPL HEADERS')
files+=(
basics/SlabAllocator.h
beast/asio/io_latency_probe.h
beast/container/aged_container.h
beast/container/aged_container_utility.h
beast/container/aged_map.h
beast/container/aged_multimap.h
beast/container/aged_multiset.h
beast/container/aged_set.h
beast/container/aged_unordered_map.h
beast/container/aged_unordered_multimap.h
beast/container/aged_unordered_multiset.h
beast/container/aged_unordered_set.h
beast/container/detail/aged_associative_container.h
beast/container/detail/aged_container_iterator.h
beast/container/detail/aged_ordered_container.h
beast/container/detail/aged_unordered_container.h
beast/container/detail/empty_base_optimization.h
beast/core/LockFreeStack.h
beast/insight/Collector.h
beast/insight/Counter.h
beast/insight/CounterImpl.h
beast/insight/Event.h
beast/insight/EventImpl.h
beast/insight/Gauge.h
beast/insight/GaugeImpl.h
beast/insight/Group.h
beast/insight/Groups.h
beast/insight/Hook.h
beast/insight/HookImpl.h
beast/insight/Insight.h
beast/insight/Meter.h
beast/insight/MeterImpl.h
beast/insight/NullCollector.h
beast/insight/StatsDCollector.h
beast/test/fail_counter.h
beast/test/fail_stream.h
beast/test/pipe_stream.h
beast/test/sig_wait.h
beast/test/string_iostream.h
beast/test/string_istream.h
beast/test/string_ostream.h
beast/test/test_allocator.h
beast/test/yield_to.h
beast/utility/hash_pair.h
beast/utility/maybe_const.h
beast/utility/temp_dir.h
# included by only json/impl/json_assert.h
json/json_errors.h
protocol/PayChan.h
protocol/RippleLedgerHash.h
protocol/messages.h
protocol/st.h
)
files+=(
basics/README.md
crypto/README.md
json/README.md
protocol/README.md
resource/README.md
)
move_files src/ripple include/xrpl detail ${files[@]}
echo move libxrpl sources
files=$(extract_list 'LIBXRPL SOURCES')
move_files src/ripple src/libxrpl "" ${files[@]}
echo check leftovers
dirs=$(cd include/xrpl; ls -d */)
dirs=$(cd src/ripple; ls -d ${dirs} 2>/dev/null || true)
files="$(cd src/ripple; find ${dirs} -type f)"
if [ -n "${files}" ]; then
echo "leftover files:"
echo ${files}
exit
fi
echo remove empty directories
empty_dirs="$(cd src/ripple; find ${dirs} -depth -type d)"
for dir in ${empty_dirs[@]}; do
if [ -e ${dir} ]; then
rmdir ${dir}
fi
done
echo move xrpld sources
files=$(
extract_list 'XRPLD SOURCES'
cd src/ripple
find * -regex '.*\.\(h\|ipp\|md\|pu\|uml\|png\)'
)
move_files src/ripple src/xrpld detail ${files[@]}
files="$(cd src/ripple; find . -type f)"
if [ -n "${files}" ]; then
echo "leftover files:"
echo ${files}
exit
fi
fi
rm -rf src/ripple
echo rename .hpp to .h
find include src -name '*.hpp' -exec bash -c 'f="{}"; git mv "${f}" "${f%hpp}h"' \;
echo move PerfLog.h
if [ -e include/xrpl/basics/PerfLog.h ]; then
git mv include/xrpl/basics/PerfLog.h src/xrpld/perflog
fi
# Make sure all protobuf includes have the correct prefix.
protobuf_replace='s:^#include\s*["<].*org/xrpl\([^">]\+\)[">]:#include <xrpl/proto/org/xrpl\1>:'
# Make sure first-party includes use angle brackets and .h extension.
ripple_replace='s:include\s*["<]ripple/\(.*\)\.h\(pp\)\?[">]:include <ripple/\1.h>:'
beast_replace='s:include\s*<beast/:include <xrpl/beast/:'
# Rename impl directories to detail.
impl_rename='s:\(<xrpl.*\)/impl\(/details\)\?/:\1/detail/:'
echo rewrite includes in libxrpl
find include/xrpl src/libxrpl -type f -exec sed -i \
-e "${protobuf_replace}" \
-e "${ripple_replace}" \
-e "${beast_replace}" \
-e 's:^#include <ripple/:#include <xrpl/:' \
-e "${impl_rename}" \
{} +
echo rewrite includes in xrpld
# # https://www.baeldung.com/linux/join-multiple-lines
libxrpl_dirs="$(cd include/xrpl; ls -d1 */ | sed 's:/$::')"
# libxrpl_dirs='a\nb\nc\n'
readarray -t libxrpl_dirs <<< "${libxrpl_dirs}"
# libxrpl_dirs=(a b c)
libxrpl_dirs=$(printf -v txt '%s\\|' "${libxrpl_dirs[@]}"; echo "${txt%\\|}")
# libxrpl_dirs='a\|b\|c'
find src/xrpld src/test -type f -exec sed -i \
-e "${protobuf_replace}" \
-e "${ripple_replace}" \
-e "${beast_replace}" \
-e "s:^#include <ripple/basics/PerfLog.h>:#include <xrpld/perflog/PerfLog.h>:" \
-e "s:^#include <ripple/\(${libxrpl_dirs}\)/:#include <xrpl/\1/:" \
-e 's:^#include <ripple/:#include <xrpld/:' \
-e "${impl_rename}" \
{} +
git commit -m 'Rearrange sources' --author 'Pretty Printer <cpp@ripple.com>'
find include src -type f \( -name '*.cpp' -o -name '*.h' -o -name '*.ipp' \) -exec clang-format-10 -i {} +
git add --update .
git commit -m 'Rewrite includes' --author 'Pretty Printer <cpp@ripple.com>'
./Builds/levelization/levelization.sh
git add --update .
git commit -m 'Recompute loops' --author 'Pretty Printer <cpp@ripple.com>'

24
bin/python/LedgerTool.py Executable file
View File

@@ -0,0 +1,24 @@
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import traceback
from ripple.ledger import Server
from ripple.ledger.commands import Cache, Info, Print
from ripple.ledger.Args import ARGS
from ripple.util import Log
from ripple.util.CommandList import CommandList
_COMMANDS = CommandList(Cache, Info, Print)
if __name__ == '__main__':
try:
server = Server.Server()
args = list(ARGS.command)
_COMMANDS.run_safe(args.pop(0), server, *args)
except Exception as e:
if ARGS.verbose:
print(traceback.format_exc(), sys.stderr)
Log.error(e)

7
bin/python/Manifest.py Executable file
View File

@@ -0,0 +1,7 @@
#!/usr/bin/env python
import sys
from ripple.util import Sign
result = Sign.run_command(sys.argv[1:])
sys.exit(0 if result else -1)

15
bin/python/README.md Normal file
View File

@@ -0,0 +1,15 @@
Unit Tests
==========
To run the Python unit tests, execute:
python -m unittest discover
from this directory.
To run Python unit tests from a particular file (such as
`ripple/util/test_Sign.py`), execute:
python -m unittest ripple.util.test_Sign
Add `-v` to run tests in verbose mode.

251
bin/python/decorator.py Normal file
View File

@@ -0,0 +1,251 @@
########################## LICENCE ###############################
# Copyright (c) 2005-2012, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
__version__ = '3.4.0'
__all__ = ["decorator", "FunctionMaker", "contextmanager"]
import sys, re, inspect
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
def get_init(cls):
return cls.__init__.im_func
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = \
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1]
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.func_defaults = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
# print >> sys.stderr, 'Compiling %s' % src
exec code in evaldict
except:
print >> sys.stderr, 'Error in generated code:'
print >> sys.stderr, src
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an attribute
__source__ is added to the result. The attributes attrs are added,
if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] #strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.func_globals.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
fun = getfullargspec(callerfunc).args[1] # second arg
elif inspect.isfunction(caller):
name = '_lambda_' if caller.__name__ == '<lambda>' \
else caller.__name__
callerfunc = caller
doc = caller.__doc__
fun = getfullargspec(callerfunc).args[0] # first arg
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.im_func
doc = caller.__call__.__doc__
fun = getfullargspec(callerfunc).args[1] # second arg
evaldict = callerfunc.func_globals.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (name, fun),
'return decorator(_call_, %s)' % fun,
evaldict, undecorated=caller, __wrapped__=caller,
doc=doc, module=caller.__module__)
######################### contextmanager ########################
def __call__(self, func):
'Context manager decorator'
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
ContextManager = type(
'ContextManager', (_GeneratorContextManager,), dict(__call__=__call__))
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager
def __init__(self, f, *a, **k):
return GeneratorContextManager.__init__(self, f(*a, **k))
ContextManager = type(
'ContextManager', (GeneratorContextManager,),
dict(__call__=__call__, __init__=__init__))
contextmanager = decorator(ContextManager)

View File

@@ -0,0 +1,14 @@
__all__ = ["curves", "der", "ecdsa", "ellipticcurve", "keys", "numbertheory",
"test_pyecdsa", "util", "six"]
from .keys import SigningKey, VerifyingKey, BadSignatureError, BadDigestError
from .curves import NIST192p, NIST224p, NIST256p, NIST384p, NIST521p, SECP256k1
_hush_pyflakes = [SigningKey, VerifyingKey, BadSignatureError, BadDigestError,
NIST192p, NIST224p, NIST256p, NIST384p, NIST521p, SECP256k1]
del _hush_pyflakes
# This code comes from http://github.com/warner/python-ecdsa
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions

View File

@@ -0,0 +1,183 @@
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.12 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = " (HEAD, master)"
git_full = "e7a6daff51221b8edd888cff404596ef90432869"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = "python-ecdsa-"
parentdir_prefix = "ecdsa-"
versionfile_source = "ecdsa/_version.py"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)

View File

@@ -0,0 +1,53 @@
from __future__ import division
from . import der, ecdsa
class UnknownCurveError(Exception):
pass
def orderlen(order):
return (1+len("%x"%order))//2 # bytes
# the NIST curves
class Curve:
def __init__(self, name, openssl_name,
curve, generator, oid):
self.name = name
self.openssl_name = openssl_name # maybe None
self.curve = curve
self.generator = generator
self.order = generator.order()
self.baselen = orderlen(self.order)
self.verifying_key_length = 2*self.baselen
self.signature_length = 2*self.baselen
self.oid = oid
self.encoded_oid = der.encode_oid(*oid)
NIST192p = Curve("NIST192p", "prime192v1",
ecdsa.curve_192, ecdsa.generator_192,
(1, 2, 840, 10045, 3, 1, 1))
NIST224p = Curve("NIST224p", "secp224r1",
ecdsa.curve_224, ecdsa.generator_224,
(1, 3, 132, 0, 33))
NIST256p = Curve("NIST256p", "prime256v1",
ecdsa.curve_256, ecdsa.generator_256,
(1, 2, 840, 10045, 3, 1, 7))
NIST384p = Curve("NIST384p", "secp384r1",
ecdsa.curve_384, ecdsa.generator_384,
(1, 3, 132, 0, 34))
NIST521p = Curve("NIST521p", "secp521r1",
ecdsa.curve_521, ecdsa.generator_521,
(1, 3, 132, 0, 35))
SECP256k1 = Curve("SECP256k1", "secp256k1",
ecdsa.curve_secp256k1, ecdsa.generator_secp256k1,
(1, 3, 132, 0, 10))
curves = [NIST192p, NIST224p, NIST256p, NIST384p, NIST521p, SECP256k1]
def find_curve(oid_curve):
for c in curves:
if c.oid == oid_curve:
return c
raise UnknownCurveError("I don't know about the curve with oid %s."
"I only know about these: %s" %
(oid_curve, [c.name for c in curves]))

199
bin/python/ecdsa/der.py Normal file
View File

@@ -0,0 +1,199 @@
from __future__ import division
import binascii
import base64
from .six import int2byte, b, integer_types, text_type
class UnexpectedDER(Exception):
pass
def encode_constructed(tag, value):
return int2byte(0xa0+tag) + encode_length(len(value)) + value
def encode_integer(r):
assert r >= 0 # can't support negative numbers yet
h = ("%x" % r).encode()
if len(h) % 2:
h = b("0") + h
s = binascii.unhexlify(h)
num = s[0] if isinstance(s[0], integer_types) else ord(s[0])
if num <= 0x7f:
return b("\x02") + int2byte(len(s)) + s
else:
# DER integers are two's complement, so if the first byte is
# 0x80-0xff then we need an extra 0x00 byte to prevent it from
# looking negative.
return b("\x02") + int2byte(len(s)+1) + b("\x00") + s
def encode_bitstring(s):
return b("\x03") + encode_length(len(s)) + s
def encode_octet_string(s):
return b("\x04") + encode_length(len(s)) + s
def encode_oid(first, second, *pieces):
assert first <= 2
assert second <= 39
encoded_pieces = [int2byte(40*first+second)] + [encode_number(p)
for p in pieces]
body = b('').join(encoded_pieces)
return b('\x06') + encode_length(len(body)) + body
def encode_sequence(*encoded_pieces):
total_len = sum([len(p) for p in encoded_pieces])
return b('\x30') + encode_length(total_len) + b('').join(encoded_pieces)
def encode_number(n):
b128_digits = []
while n:
b128_digits.insert(0, (n & 0x7f) | 0x80)
n = n >> 7
if not b128_digits:
b128_digits.append(0)
b128_digits[-1] &= 0x7f
return b('').join([int2byte(d) for d in b128_digits])
def remove_constructed(string):
s0 = string[0] if isinstance(string[0], integer_types) else ord(string[0])
if (s0 & 0xe0) != 0xa0:
raise UnexpectedDER("wanted constructed tag (0xa0-0xbf), got 0x%02x"
% s0)
tag = s0 & 0x1f
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return tag, body, rest
def remove_sequence(string):
if not string.startswith(b("\x30")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted sequence (0x30), got 0x%02x" % n)
length, lengthlength = read_length(string[1:])
endseq = 1+lengthlength+length
return string[1+lengthlength:endseq], string[endseq:]
def remove_octet_string(string):
if not string.startswith(b("\x04")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted octetstring (0x04), got 0x%02x" % n)
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return body, rest
def remove_object(string):
if not string.startswith(b("\x06")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted object (0x06), got 0x%02x" % n)
length, lengthlength = read_length(string[1:])
body = string[1+lengthlength:1+lengthlength+length]
rest = string[1+lengthlength+length:]
numbers = []
while body:
n, ll = read_number(body)
numbers.append(n)
body = body[ll:]
n0 = numbers.pop(0)
first = n0//40
second = n0-(40*first)
numbers.insert(0, first)
numbers.insert(1, second)
return tuple(numbers), rest
def remove_integer(string):
if not string.startswith(b("\x02")):
n = string[0] if isinstance(string[0], integer_types) else ord(string[0])
raise UnexpectedDER("wanted integer (0x02), got 0x%02x" % n)
length, llen = read_length(string[1:])
numberbytes = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
nbytes = numberbytes[0] if isinstance(numberbytes[0], integer_types) else ord(numberbytes[0])
assert nbytes < 0x80 # can't support negative numbers yet
return int(binascii.hexlify(numberbytes), 16), rest
def read_number(string):
number = 0
llen = 0
# base-128 big endian, with b7 set in all but the last byte
while True:
if llen > len(string):
raise UnexpectedDER("ran out of length bytes")
number = number << 7
d = string[llen] if isinstance(string[llen], integer_types) else ord(string[llen])
number += (d & 0x7f)
llen += 1
if not d & 0x80:
break
return number, llen
def encode_length(l):
assert l >= 0
if l < 0x80:
return int2byte(l)
s = ("%x" % l).encode()
if len(s)%2:
s = b("0")+s
s = binascii.unhexlify(s)
llen = len(s)
return int2byte(0x80|llen) + s
def read_length(string):
num = string[0] if isinstance(string[0], integer_types) else ord(string[0])
if not (num & 0x80):
# short form
return (num & 0x7f), 1
# else long-form: b0&0x7f is number of additional base256 length bytes,
# big-endian
llen = num & 0x7f
if llen > len(string)-1:
raise UnexpectedDER("ran out of length bytes")
return int(binascii.hexlify(string[1:1+llen]), 16), 1+llen
def remove_bitstring(string):
num = string[0] if isinstance(string[0], integer_types) else ord(string[0])
if not string.startswith(b("\x03")):
raise UnexpectedDER("wanted bitstring (0x03), got 0x%02x" % num)
length, llen = read_length(string[1:])
body = string[1+llen:1+llen+length]
rest = string[1+llen+length:]
return body, rest
# SEQUENCE([1, STRING(secexp), cont[0], OBJECT(curvename), cont[1], BINTSTRING)
# signatures: (from RFC3279)
# ansi-X9-62 OBJECT IDENTIFIER ::= {
# iso(1) member-body(2) us(840) 10045 }
#
# id-ecSigType OBJECT IDENTIFIER ::= {
# ansi-X9-62 signatures(4) }
# ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
# id-ecSigType 1 }
## so 1,2,840,10045,4,1
## so 0x42, .. ..
# Ecdsa-Sig-Value ::= SEQUENCE {
# r INTEGER,
# s INTEGER }
# id-public-key-type OBJECT IDENTIFIER ::= { ansi-X9.62 2 }
#
# id-ecPublicKey OBJECT IDENTIFIER ::= { id-publicKeyType 1 }
# I think the secp224r1 identifier is (t=06,l=05,v=2b81040021)
# secp224r1 OBJECT IDENTIFIER ::= {
# iso(1) identified-organization(3) certicom(132) curve(0) 33 }
# and the secp384r1 is (t=06,l=05,v=2b81040022)
# secp384r1 OBJECT IDENTIFIER ::= {
# iso(1) identified-organization(3) certicom(132) curve(0) 34 }
def unpem(pem):
if isinstance(pem, text_type):
pem = pem.encode()
d = b("").join([l.strip() for l in pem.split(b("\n"))
if l and not l.startswith(b("-----"))])
return base64.b64decode(d)
def topem(der, name):
b64 = base64.b64encode(der)
lines = [("-----BEGIN %s-----\n" % name).encode()]
lines.extend([b64[start:start+64]+b("\n")
for start in range(0, len(b64), 64)])
lines.append(("-----END %s-----\n" % name).encode())
return b("").join(lines)

576
bin/python/ecdsa/ecdsa.py Normal file
View File

@@ -0,0 +1,576 @@
#! /usr/bin/env python
"""
Implementation of Elliptic-Curve Digital Signatures.
Classes and methods for elliptic-curve signatures:
private keys, public keys, signatures,
NIST prime-modulus curves with modulus lengths of
192, 224, 256, 384, and 521 bits.
Example:
# (In real-life applications, you would probably want to
# protect against defects in SystemRandom.)
from random import SystemRandom
randrange = SystemRandom().randrange
# Generate a public/private key pair using the NIST Curve P-192:
g = generator_192
n = g.order()
secret = randrange( 1, n )
pubkey = Public_key( g, g * secret )
privkey = Private_key( pubkey, secret )
# Signing a hash value:
hash = randrange( 1, n )
signature = privkey.sign( hash, randrange( 1, n ) )
# Verifying a signature for a hash value:
if pubkey.verifies( hash, signature ):
print_("Demo verification succeeded.")
else:
print_("*** Demo verification failed.")
# Verification fails if the hash value is modified:
if pubkey.verifies( hash-1, signature ):
print_("**** Demo verification failed to reject tampered hash.")
else:
print_("Demo verification correctly rejected tampered hash.")
Version of 2009.05.16.
Revision history:
2005.12.31 - Initial version.
2008.11.25 - Substantial revisions introducing new classes.
2009.05.16 - Warn against using random.randrange in real applications.
2009.05.17 - Use random.SystemRandom by default.
Written in 2005 by Peter Pearson and placed in the public domain.
"""
from .six import int2byte, b, print_
from . import ellipticcurve
from . import numbertheory
import random
class Signature( object ):
"""ECDSA signature.
"""
def __init__( self, r, s ):
self.r = r
self.s = s
class Public_key( object ):
"""Public key for ECDSA.
"""
def __init__( self, generator, point ):
"""generator is the Point that generates the group,
point is the Point that defines the public key.
"""
self.curve = generator.curve()
self.generator = generator
self.point = point
n = generator.order()
if not n:
raise RuntimeError("Generator point must have order.")
if not n * point == ellipticcurve.INFINITY:
raise RuntimeError("Generator point order is bad.")
if point.x() < 0 or n <= point.x() or point.y() < 0 or n <= point.y():
raise RuntimeError("Generator point has x or y out of range.")
def verifies( self, hash, signature ):
"""Verify that signature is a valid signature of hash.
Return True if the signature is valid.
"""
# From X9.62 J.3.1.
G = self.generator
n = G.order()
r = signature.r
s = signature.s
if r < 1 or r > n-1: return False
if s < 1 or s > n-1: return False
c = numbertheory.inverse_mod( s, n )
u1 = ( hash * c ) % n
u2 = ( r * c ) % n
xy = u1 * G + u2 * self.point
v = xy.x() % n
return v == r
class Private_key( object ):
"""Private key for ECDSA.
"""
def __init__( self, public_key, secret_multiplier ):
"""public_key is of class Public_key;
secret_multiplier is a large integer.
"""
self.public_key = public_key
self.secret_multiplier = secret_multiplier
def sign( self, hash, random_k ):
"""Return a signature for the provided hash, using the provided
random nonce. It is absolutely vital that random_k be an unpredictable
number in the range [1, self.public_key.point.order()-1]. If
an attacker can guess random_k, he can compute our private key from a
single signature. Also, if an attacker knows a few high-order
bits (or a few low-order bits) of random_k, he can compute our private
key from many signatures. The generation of nonces with adequate
cryptographic strength is very difficult and far beyond the scope
of this comment.
May raise RuntimeError, in which case retrying with a new
random value k is in order.
"""
G = self.public_key.generator
n = G.order()
k = random_k % n
p1 = k * G
r = p1.x()
if r == 0: raise RuntimeError("amazingly unlucky random number r")
s = ( numbertheory.inverse_mod( k, n ) * \
( hash + ( self.secret_multiplier * r ) % n ) ) % n
if s == 0: raise RuntimeError("amazingly unlucky random number s")
return Signature( r, s )
def int_to_string( x ):
"""Convert integer x into a string of bytes, as per X9.62."""
assert x >= 0
if x == 0: return b('\0')
result = []
while x:
ordinal = x & 0xFF
result.append(int2byte(ordinal))
x >>= 8
result.reverse()
return b('').join(result)
def string_to_int( s ):
"""Convert a string of bytes into an integer, as per X9.62."""
result = 0
for c in s:
if not isinstance(c, int): c = ord( c )
result = 256 * result + c
return result
def digest_integer( m ):
"""Convert an integer into a string of bytes, compute
its SHA-1 hash, and convert the result to an integer."""
#
# I don't expect this function to be used much. I wrote
# it in order to be able to duplicate the examples
# in ECDSAVS.
#
from hashlib import sha1
return string_to_int( sha1( int_to_string( m ) ).digest() )
def point_is_valid( generator, x, y ):
"""Is (x,y) a valid public key based on the specified generator?"""
# These are the tests specified in X9.62.
n = generator.order()
curve = generator.curve()
if x < 0 or n <= x or y < 0 or n <= y:
return False
if not curve.contains_point( x, y ):
return False
if not n*ellipticcurve.Point( curve, x, y ) == \
ellipticcurve.INFINITY:
return False
return True
# NIST Curve P-192:
_p = 6277101735386680763835789423207666416083908700390324961279
_r = 6277101735386680763835789423176059013767194773182842284081
# s = 0x3045ae6fc8422f64ed579528d38120eae12196d5L
# c = 0x3099d2bbbfcb2538542dcd5fb078b6ef5f3d6fe2c745de65L
_b = 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
_Gx = 0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012
_Gy = 0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811
curve_192 = ellipticcurve.CurveFp( _p, -3, _b )
generator_192 = ellipticcurve.Point( curve_192, _Gx, _Gy, _r )
# NIST Curve P-224:
_p = 26959946667150639794667015087019630673557916260026308143510066298881
_r = 26959946667150639794667015087019625940457807714424391721682722368061
# s = 0xbd71344799d5c7fcdc45b59fa3b9ab8f6a948bc5L
# c = 0x5b056c7e11dd68f40469ee7f3c7a7d74f7d121116506d031218291fbL
_b = 0xb4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4
_Gx =0xb70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21
_Gy = 0xbd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34
curve_224 = ellipticcurve.CurveFp( _p, -3, _b )
generator_224 = ellipticcurve.Point( curve_224, _Gx, _Gy, _r )
# NIST Curve P-256:
_p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
_r = 115792089210356248762697446949407573529996955224135760342422259061068512044369
# s = 0xc49d360886e704936a6678e1139d26b7819f7e90L
# c = 0x7efba1662985be9403cb055c75d4f7e0ce8d84a9c5114abcaf3177680104fa0dL
_b = 0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b
_Gx = 0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296
_Gy = 0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5
curve_256 = ellipticcurve.CurveFp( _p, -3, _b )
generator_256 = ellipticcurve.Point( curve_256, _Gx, _Gy, _r )
# NIST Curve P-384:
_p = 39402006196394479212279040100143613805079739270465446667948293404245721771496870329047266088258938001861606973112319
_r = 39402006196394479212279040100143613805079739270465446667946905279627659399113263569398956308152294913554433653942643
# s = 0xa335926aa319a27a1d00896a6773a4827acdac73L
# c = 0x79d1e655f868f02fff48dcdee14151ddb80643c1406d0ca10dfe6fc52009540a495e8042ea5f744f6e184667cc722483L
_b = 0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef
_Gx = 0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7
_Gy = 0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f
curve_384 = ellipticcurve.CurveFp( _p, -3, _b )
generator_384 = ellipticcurve.Point( curve_384, _Gx, _Gy, _r )
# NIST Curve P-521:
_p = 6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151
_r = 6864797660130609714981900799081393217269435300143305409394463459185543183397655394245057746333217197532963996371363321113864768612440380340372808892707005449
# s = 0xd09e8800291cb85396cc6717393284aaa0da64baL
# c = 0x0b48bfa5f420a34949539d2bdfc264eeeeb077688e44fbf0ad8f6d0edb37bd6b533281000518e19f1b9ffbe0fe9ed8a3c2200b8f875e523868c70c1e5bf55bad637L
_b = 0x051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00
_Gx = 0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66
_Gy = 0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650
curve_521 = ellipticcurve.CurveFp( _p, -3, _b )
generator_521 = ellipticcurve.Point( curve_521, _Gx, _Gy, _r )
# Certicom secp256-k1
_a = 0x0000000000000000000000000000000000000000000000000000000000000000
_b = 0x0000000000000000000000000000000000000000000000000000000000000007
_p = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f
_Gx = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
_Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
_r = 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141
curve_secp256k1 = ellipticcurve.CurveFp( _p, _a, _b)
generator_secp256k1 = ellipticcurve.Point( curve_secp256k1, _Gx, _Gy, _r)
def __main__():
class TestFailure(Exception): pass
def test_point_validity( generator, x, y, expected ):
"""generator defines the curve; is (x,y) a point on
this curve? "expected" is True if the right answer is Yes."""
if point_is_valid( generator, x, y ) == expected:
print_("Point validity tested as expected.")
else:
raise TestFailure("*** Point validity test gave wrong result.")
def test_signature_validity( Msg, Qx, Qy, R, S, expected ):
"""Msg = message, Qx and Qy represent the base point on
elliptic curve c192, R and S are the signature, and
"expected" is True iff the signature is expected to be valid."""
pubk = Public_key( generator_192,
ellipticcurve.Point( curve_192, Qx, Qy ) )
got = pubk.verifies( digest_integer( Msg ), Signature( R, S ) )
if got == expected:
print_("Signature tested as expected: got %s, expected %s." % \
( got, expected ))
else:
raise TestFailure("*** Signature test failed: got %s, expected %s." % \
( got, expected ))
print_("NIST Curve P-192:")
p192 = generator_192
# From X9.62:
d = 651056770906015076056810763456358567190100156695615665659
Q = d * p192
if Q.x() != 0x62B12D60690CDCF330BABAB6E69763B471F994DD702D16A5:
raise TestFailure("*** p192 * d came out wrong.")
else:
print_("p192 * d came out right.")
k = 6140507067065001063065065565667405560006161556565665656654
R = k * p192
if R.x() != 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD \
or R.y() != 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835:
raise TestFailure("*** k * p192 came out wrong.")
else:
print_("k * p192 came out right.")
u1 = 2563697409189434185194736134579731015366492496392189760599
u2 = 6266643813348617967186477710235785849136406323338782220568
temp = u1 * p192 + u2 * Q
if temp.x() != 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD \
or temp.y() != 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835:
raise TestFailure("*** u1 * p192 + u2 * Q came out wrong.")
else:
print_("u1 * p192 + u2 * Q came out right.")
e = 968236873715988614170569073515315707566766479517
pubk = Public_key( generator_192, generator_192 * d )
privk = Private_key( pubk, d )
sig = privk.sign( e, k )
r, s = sig.r, sig.s
if r != 3342403536405981729393488334694600415596881826869351677613 \
or s != 5735822328888155254683894997897571951568553642892029982342:
raise TestFailure("*** r or s came out wrong.")
else:
print_("r and s came out right.")
valid = pubk.verifies( e, sig )
if valid: print_("Signature verified OK.")
else: raise TestFailure("*** Signature failed verification.")
valid = pubk.verifies( e-1, sig )
if not valid: print_("Forgery was correctly rejected.")
else: raise TestFailure("*** Forgery was erroneously accepted.")
print_("Testing point validity, as per ECDSAVS.pdf B.2.2:")
test_point_validity( \
p192, \
0xcd6d0f029a023e9aaca429615b8f577abee685d8257cc83a, \
0x00019c410987680e9fb6c0b6ecc01d9a2647c8bae27721bacdfc, \
False )
test_point_validity(
p192, \
0x00017f2fce203639e9eaf9fb50b81fc32776b30e3b02af16c73b, \
0x95da95c5e72dd48e229d4748d4eee658a9a54111b23b2adb, \
False )
test_point_validity(
p192, \
0x4f77f8bc7fccbadd5760f4938746d5f253ee2168c1cf2792, \
0x000147156ff824d131629739817edb197717c41aab5c2a70f0f6, \
False )
test_point_validity(
p192, \
0xc58d61f88d905293bcd4cd0080bcb1b7f811f2ffa41979f6, \
0x8804dc7a7c4c7f8b5d437f5156f3312ca7d6de8a0e11867f, \
True )
test_point_validity(
p192, \
0xcdf56c1aa3d8afc53c521adf3ffb96734a6a630a4a5b5a70, \
0x97c1c44a5fb229007b5ec5d25f7413d170068ffd023caa4e, \
True )
test_point_validity(
p192, \
0x89009c0dc361c81e99280c8e91df578df88cdf4b0cdedced, \
0x27be44a529b7513e727251f128b34262a0fd4d8ec82377b9, \
True )
test_point_validity(
p192, \
0x6a223d00bd22c52833409a163e057e5b5da1def2a197dd15, \
0x7b482604199367f1f303f9ef627f922f97023e90eae08abf, \
True )
test_point_validity(
p192, \
0x6dccbde75c0948c98dab32ea0bc59fe125cf0fb1a3798eda, \
0x0001171a3e0fa60cf3096f4e116b556198de430e1fbd330c8835, \
False )
test_point_validity(
p192, \
0xd266b39e1f491fc4acbbbc7d098430931cfa66d55015af12, \
0x193782eb909e391a3148b7764e6b234aa94e48d30a16dbb2, \
False )
test_point_validity(
p192, \
0x9d6ddbcd439baa0c6b80a654091680e462a7d1d3f1ffeb43, \
0x6ad8efc4d133ccf167c44eb4691c80abffb9f82b932b8caa, \
False )
test_point_validity(
p192, \
0x146479d944e6bda87e5b35818aa666a4c998a71f4e95edbc, \
0xa86d6fe62bc8fbd88139693f842635f687f132255858e7f6, \
False )
test_point_validity(
p192, \
0xe594d4a598046f3598243f50fd2c7bd7d380edb055802253, \
0x509014c0c4d6b536e3ca750ec09066af39b4c8616a53a923, \
False )
print_("Trying signature-verification tests from ECDSAVS.pdf B.2.4:")
print_("P-192:")
Msg = 0x84ce72aa8699df436059f052ac51b6398d2511e49631bcb7e71f89c499b9ee425dfbc13a5f6d408471b054f2655617cbbaf7937b7c80cd8865cf02c8487d30d2b0fbd8b2c4e102e16d828374bbc47b93852f212d5043c3ea720f086178ff798cc4f63f787b9c2e419efa033e7644ea7936f54462dc21a6c4580725f7f0e7d158
Qx = 0xd9dbfb332aa8e5ff091e8ce535857c37c73f6250ffb2e7ac
Qy = 0x282102e364feded3ad15ddf968f88d8321aa268dd483ebc4
R = 0x64dca58a20787c488d11d6dd96313f1b766f2d8efe122916
S = 0x1ecba28141e84ab4ecad92f56720e2cc83eb3d22dec72479
test_signature_validity( Msg, Qx, Qy, R, S, True )
Msg = 0x94bb5bacd5f8ea765810024db87f4224ad71362a3c28284b2b9f39fab86db12e8beb94aae899768229be8fdb6c4f12f28912bb604703a79ccff769c1607f5a91450f30ba0460d359d9126cbd6296be6d9c4bb96c0ee74cbb44197c207f6db326ab6f5a659113a9034e54be7b041ced9dcf6458d7fb9cbfb2744d999f7dfd63f4
Qx = 0x3e53ef8d3112af3285c0e74842090712cd324832d4277ae7
Qy = 0xcc75f8952d30aec2cbb719fc6aa9934590b5d0ff5a83adb7
R = 0x8285261607283ba18f335026130bab31840dcfd9c3e555af
S = 0x356d89e1b04541afc9704a45e9c535ce4a50929e33d7e06c
test_signature_validity( Msg, Qx, Qy, R, S, True )
Msg = 0xf6227a8eeb34afed1621dcc89a91d72ea212cb2f476839d9b4243c66877911b37b4ad6f4448792a7bbba76c63bdd63414b6facab7dc71c3396a73bd7ee14cdd41a659c61c99b779cecf07bc51ab391aa3252386242b9853ea7da67fd768d303f1b9b513d401565b6f1eb722dfdb96b519fe4f9bd5de67ae131e64b40e78c42dd
Qx = 0x16335dbe95f8e8254a4e04575d736befb258b8657f773cb7
Qy = 0x421b13379c59bc9dce38a1099ca79bbd06d647c7f6242336
R = 0x4141bd5d64ea36c5b0bd21ef28c02da216ed9d04522b1e91
S = 0x159a6aa852bcc579e821b7bb0994c0861fb08280c38daa09
test_signature_validity( Msg, Qx, Qy, R, S, False )
Msg = 0x16b5f93afd0d02246f662761ed8e0dd9504681ed02a253006eb36736b563097ba39f81c8e1bce7a16c1339e345efabbc6baa3efb0612948ae51103382a8ee8bc448e3ef71e9f6f7a9676694831d7f5dd0db5446f179bcb737d4a526367a447bfe2c857521c7f40b6d7d7e01a180d92431fb0bbd29c04a0c420a57b3ed26ccd8a
Qx = 0xfd14cdf1607f5efb7b1793037b15bdf4baa6f7c16341ab0b
Qy = 0x83fa0795cc6c4795b9016dac928fd6bac32f3229a96312c4
R = 0x8dfdb832951e0167c5d762a473c0416c5c15bc1195667dc1
S = 0x1720288a2dc13fa1ec78f763f8fe2ff7354a7e6fdde44520
test_signature_validity( Msg, Qx, Qy, R, S, False )
Msg = 0x08a2024b61b79d260e3bb43ef15659aec89e5b560199bc82cf7c65c77d39192e03b9a895d766655105edd9188242b91fbde4167f7862d4ddd61e5d4ab55196683d4f13ceb90d87aea6e07eb50a874e33086c4a7cb0273a8e1c4408f4b846bceae1ebaac1b2b2ea851a9b09de322efe34cebe601653efd6ddc876ce8c2f2072fb
Qx = 0x674f941dc1a1f8b763c9334d726172d527b90ca324db8828
Qy = 0x65adfa32e8b236cb33a3e84cf59bfb9417ae7e8ede57a7ff
R = 0x9508b9fdd7daf0d8126f9e2bc5a35e4c6d800b5b804d7796
S = 0x36f2bf6b21b987c77b53bb801b3435a577e3d493744bfab0
test_signature_validity( Msg, Qx, Qy, R, S, False )
Msg = 0x1843aba74b0789d4ac6b0b8923848023a644a7b70afa23b1191829bbe4397ce15b629bf21a8838298653ed0c19222b95fa4f7390d1b4c844d96e645537e0aae98afb5c0ac3bd0e4c37f8daaff25556c64e98c319c52687c904c4de7240a1cc55cd9756b7edaef184e6e23b385726e9ffcba8001b8f574987c1a3fedaaa83ca6d
Qx = 0x10ecca1aad7220b56a62008b35170bfd5e35885c4014a19f
Qy = 0x04eb61984c6c12ade3bc47f3c629ece7aa0a033b9948d686
R = 0x82bfa4e82c0dfe9274169b86694e76ce993fd83b5c60f325
S = 0xa97685676c59a65dbde002fe9d613431fb183e8006d05633
test_signature_validity( Msg, Qx, Qy, R, S, False )
Msg = 0x5a478f4084ddd1a7fea038aa9732a822106385797d02311aeef4d0264f824f698df7a48cfb6b578cf3da416bc0799425bb491be5b5ecc37995b85b03420a98f2c4dc5c31a69a379e9e322fbe706bbcaf0f77175e05cbb4fa162e0da82010a278461e3e974d137bc746d1880d6eb02aa95216014b37480d84b87f717bb13f76e1
Qx = 0x6636653cb5b894ca65c448277b29da3ad101c4c2300f7c04
Qy = 0xfdf1cbb3fc3fd6a4f890b59e554544175fa77dbdbeb656c1
R = 0xeac2ddecddfb79931a9c3d49c08de0645c783a24cb365e1c
S = 0x3549fee3cfa7e5f93bc47d92d8ba100e881a2a93c22f8d50
test_signature_validity( Msg, Qx, Qy, R, S, False )
Msg = 0xc598774259a058fa65212ac57eaa4f52240e629ef4c310722088292d1d4af6c39b49ce06ba77e4247b20637174d0bd67c9723feb57b5ead232b47ea452d5d7a089f17c00b8b6767e434a5e16c231ba0efa718a340bf41d67ea2d295812ff1b9277daacb8bc27b50ea5e6443bcf95ef4e9f5468fe78485236313d53d1c68f6ba2
Qx = 0xa82bd718d01d354001148cd5f69b9ebf38ff6f21898f8aaa
Qy = 0xe67ceede07fc2ebfafd62462a51e4b6c6b3d5b537b7caf3e
R = 0x4d292486c620c3de20856e57d3bb72fcde4a73ad26376955
S = 0xa85289591a6081d5728825520e62ff1c64f94235c04c7f95
test_signature_validity( Msg, Qx, Qy, R, S, False )
Msg = 0xca98ed9db081a07b7557f24ced6c7b9891269a95d2026747add9e9eb80638a961cf9c71a1b9f2c29744180bd4c3d3db60f2243c5c0b7cc8a8d40a3f9a7fc910250f2187136ee6413ffc67f1a25e1c4c204fa9635312252ac0e0481d89b6d53808f0c496ba87631803f6c572c1f61fa049737fdacce4adff757afed4f05beb658
Qx = 0x7d3b016b57758b160c4fca73d48df07ae3b6b30225126c2f
Qy = 0x4af3790d9775742bde46f8da876711be1b65244b2b39e7ec
R = 0x95f778f5f656511a5ab49a5d69ddd0929563c29cbc3a9e62
S = 0x75c87fc358c251b4c83d2dd979faad496b539f9f2ee7a289
test_signature_validity( Msg, Qx, Qy, R, S, False )
Msg = 0x31dd9a54c8338bea06b87eca813d555ad1850fac9742ef0bbe40dad400e10288acc9c11ea7dac79eb16378ebea9490e09536099f1b993e2653cd50240014c90a9c987f64545abc6a536b9bd2435eb5e911fdfde2f13be96ea36ad38df4ae9ea387b29cced599af777338af2794820c9cce43b51d2112380a35802ab7e396c97a
Qx = 0x9362f28c4ef96453d8a2f849f21e881cd7566887da8beb4a
Qy = 0xe64d26d8d74c48a024ae85d982ee74cd16046f4ee5333905
R = 0xf3923476a296c88287e8de914b0b324ad5a963319a4fe73b
S = 0xf0baeed7624ed00d15244d8ba2aede085517dbdec8ac65f5
test_signature_validity( Msg, Qx, Qy, R, S, True )
Msg = 0xb2b94e4432267c92f9fdb9dc6040c95ffa477652761290d3c7de312283f6450d89cc4aabe748554dfb6056b2d8e99c7aeaad9cdddebdee9dbc099839562d9064e68e7bb5f3a6bba0749ca9a538181fc785553a4000785d73cc207922f63e8ce1112768cb1de7b673aed83a1e4a74592f1268d8e2a4e9e63d414b5d442bd0456d
Qx = 0xcc6fc032a846aaac25533eb033522824f94e670fa997ecef
Qy = 0xe25463ef77a029eccda8b294fd63dd694e38d223d30862f1
R = 0x066b1d07f3a40e679b620eda7f550842a35c18b80c5ebe06
S = 0xa0b0fb201e8f2df65e2c4508ef303bdc90d934016f16b2dc
test_signature_validity( Msg, Qx, Qy, R, S, False )
Msg = 0x4366fcadf10d30d086911de30143da6f579527036937007b337f7282460eae5678b15cccda853193ea5fc4bc0a6b9d7a31128f27e1214988592827520b214eed5052f7775b750b0c6b15f145453ba3fee24a085d65287e10509eb5d5f602c440341376b95c24e5c4727d4b859bfe1483d20538acdd92c7997fa9c614f0f839d7
Qx = 0x955c908fe900a996f7e2089bee2f6376830f76a19135e753
Qy = 0xba0c42a91d3847de4a592a46dc3fdaf45a7cc709b90de520
R = 0x1f58ad77fc04c782815a1405b0925e72095d906cbf52a668
S = 0xf2e93758b3af75edf784f05a6761c9b9a6043c66b845b599
test_signature_validity( Msg, Qx, Qy, R, S, False )
Msg = 0x543f8af57d750e33aa8565e0cae92bfa7a1ff78833093421c2942cadf9986670a5ff3244c02a8225e790fbf30ea84c74720abf99cfd10d02d34377c3d3b41269bea763384f372bb786b5846f58932defa68023136cd571863b304886e95e52e7877f445b9364b3f06f3c28da12707673fecb4b8071de06b6e0a3c87da160cef3
Qx = 0x31f7fa05576d78a949b24812d4383107a9a45bb5fccdd835
Qy = 0x8dc0eb65994a90f02b5e19bd18b32d61150746c09107e76b
R = 0xbe26d59e4e883dde7c286614a767b31e49ad88789d3a78ff
S = 0x8762ca831c1ce42df77893c9b03119428e7a9b819b619068
test_signature_validity( Msg, Qx, Qy, R, S, False )
Msg = 0xd2e8454143ce281e609a9d748014dcebb9d0bc53adb02443a6aac2ffe6cb009f387c346ecb051791404f79e902ee333ad65e5c8cb38dc0d1d39a8dc90add5023572720e5b94b190d43dd0d7873397504c0c7aef2727e628eb6a74411f2e400c65670716cb4a815dc91cbbfeb7cfe8c929e93184c938af2c078584da045e8f8d1
Qx = 0x66aa8edbbdb5cf8e28ceb51b5bda891cae2df84819fe25c0
Qy = 0x0c6bc2f69030a7ce58d4a00e3b3349844784a13b8936f8da
R = 0xa4661e69b1734f4a71b788410a464b71e7ffe42334484f23
S = 0x738421cf5e049159d69c57a915143e226cac8355e149afe9
test_signature_validity( Msg, Qx, Qy, R, S, False )
Msg = 0x6660717144040f3e2f95a4e25b08a7079c702a8b29babad5a19a87654bc5c5afa261512a11b998a4fb36b5d8fe8bd942792ff0324b108120de86d63f65855e5461184fc96a0a8ffd2ce6d5dfb0230cbbdd98f8543e361b3205f5da3d500fdc8bac6db377d75ebef3cb8f4d1ff738071ad0938917889250b41dd1d98896ca06fb
Qx = 0xbcfacf45139b6f5f690a4c35a5fffa498794136a2353fc77
Qy = 0x6f4a6c906316a6afc6d98fe1f0399d056f128fe0270b0f22
R = 0x9db679a3dafe48f7ccad122933acfe9da0970b71c94c21c1
S = 0x984c2db99827576c0a41a5da41e07d8cc768bc82f18c9da9
test_signature_validity( Msg, Qx, Qy, R, S, False )
print_("Testing the example code:")
# Building a public/private key pair from the NIST Curve P-192:
g = generator_192
n = g.order()
# (random.SystemRandom is supposed to provide
# crypto-quality random numbers, but as Debian recently
# illustrated, a systems programmer can accidentally
# demolish this security, so in serious applications
# further precautions are appropriate.)
randrange = random.SystemRandom().randrange
secret = randrange( 1, n )
pubkey = Public_key( g, g * secret )
privkey = Private_key( pubkey, secret )
# Signing a hash value:
hash = randrange( 1, n )
signature = privkey.sign( hash, randrange( 1, n ) )
# Verifying a signature for a hash value:
if pubkey.verifies( hash, signature ):
print_("Demo verification succeeded.")
else:
raise TestFailure("*** Demo verification failed.")
if pubkey.verifies( hash-1, signature ):
raise TestFailure( "**** Demo verification failed to reject tampered hash.")
else:
print_("Demo verification correctly rejected tampered hash.")
if __name__ == "__main__":
__main__()

View File

@@ -0,0 +1,293 @@
#! /usr/bin/env python
#
# Implementation of elliptic curves, for cryptographic applications.
#
# This module doesn't provide any way to choose a random elliptic
# curve, nor to verify that an elliptic curve was chosen randomly,
# because one can simply use NIST's standard curves.
#
# Notes from X9.62-1998 (draft):
# Nomenclature:
# - Q is a public key.
# The "Elliptic Curve Domain Parameters" include:
# - q is the "field size", which in our case equals p.
# - p is a big prime.
# - G is a point of prime order (5.1.1.1).
# - n is the order of G (5.1.1.1).
# Public-key validation (5.2.2):
# - Verify that Q is not the point at infinity.
# - Verify that X_Q and Y_Q are in [0,p-1].
# - Verify that Q is on the curve.
# - Verify that nQ is the point at infinity.
# Signature generation (5.3):
# - Pick random k from [1,n-1].
# Signature checking (5.4.2):
# - Verify that r and s are in [1,n-1].
#
# Version of 2008.11.25.
#
# Revision history:
# 2005.12.31 - Initial version.
# 2008.11.25 - Change CurveFp.is_on to contains_point.
#
# Written in 2005 by Peter Pearson and placed in the public domain.
from __future__ import division
from .six import print_
from . import numbertheory
class CurveFp( object ):
"""Elliptic Curve over the field of integers modulo a prime."""
def __init__( self, p, a, b ):
"""The curve of points satisfying y^2 = x^3 + a*x + b (mod p)."""
self.__p = p
self.__a = a
self.__b = b
def p( self ):
return self.__p
def a( self ):
return self.__a
def b( self ):
return self.__b
def contains_point( self, x, y ):
"""Is the point (x,y) on this curve?"""
return ( y * y - ( x * x * x + self.__a * x + self.__b ) ) % self.__p == 0
class Point( object ):
"""A point on an elliptic curve. Altering x and y is forbidding,
but they can be read by the x() and y() methods."""
def __init__( self, curve, x, y, order = None ):
"""curve, x, y, order; order (optional) is the order of this point."""
self.__curve = curve
self.__x = x
self.__y = y
self.__order = order
# self.curve is allowed to be None only for INFINITY:
if self.__curve: assert self.__curve.contains_point( x, y )
if order: assert self * order == INFINITY
def __eq__( self, other ):
"""Return True if the points are identical, False otherwise."""
if self.__curve == other.__curve \
and self.__x == other.__x \
and self.__y == other.__y:
return True
else:
return False
def __add__( self, other ):
"""Add one point to another point."""
# X9.62 B.3:
if other == INFINITY: return self
if self == INFINITY: return other
assert self.__curve == other.__curve
if self.__x == other.__x:
if ( self.__y + other.__y ) % self.__curve.p() == 0:
return INFINITY
else:
return self.double()
p = self.__curve.p()
l = ( ( other.__y - self.__y ) * \
numbertheory.inverse_mod( other.__x - self.__x, p ) ) % p
x3 = ( l * l - self.__x - other.__x ) % p
y3 = ( l * ( self.__x - x3 ) - self.__y ) % p
return Point( self.__curve, x3, y3 )
def __mul__( self, other ):
"""Multiply a point by an integer."""
def leftmost_bit( x ):
assert x > 0
result = 1
while result <= x: result = 2 * result
return result // 2
e = other
if self.__order: e = e % self.__order
if e == 0: return INFINITY
if self == INFINITY: return INFINITY
assert e > 0
# From X9.62 D.3.2:
e3 = 3 * e
negative_self = Point( self.__curve, self.__x, -self.__y, self.__order )
i = leftmost_bit( e3 ) // 2
result = self
# print_("Multiplying %s by %d (e3 = %d):" % ( self, other, e3 ))
while i > 1:
result = result.double()
if ( e3 & i ) != 0 and ( e & i ) == 0: result = result + self
if ( e3 & i ) == 0 and ( e & i ) != 0: result = result + negative_self
# print_(". . . i = %d, result = %s" % ( i, result ))
i = i // 2
return result
def __rmul__( self, other ):
"""Multiply a point by an integer."""
return self * other
def __str__( self ):
if self == INFINITY: return "infinity"
return "(%d,%d)" % ( self.__x, self.__y )
def double( self ):
"""Return a new point that is twice the old."""
if self == INFINITY:
return INFINITY
# X9.62 B.3:
p = self.__curve.p()
a = self.__curve.a()
l = ( ( 3 * self.__x * self.__x + a ) * \
numbertheory.inverse_mod( 2 * self.__y, p ) ) % p
x3 = ( l * l - 2 * self.__x ) % p
y3 = ( l * ( self.__x - x3 ) - self.__y ) % p
return Point( self.__curve, x3, y3 )
def x( self ):
return self.__x
def y( self ):
return self.__y
def curve( self ):
return self.__curve
def order( self ):
return self.__order
# This one point is the Point At Infinity for all purposes:
INFINITY = Point( None, None, None )
def __main__():
class FailedTest(Exception): pass
def test_add( c, x1, y1, x2, y2, x3, y3 ):
"""We expect that on curve c, (x1,y1) + (x2, y2 ) = (x3, y3)."""
p1 = Point( c, x1, y1 )
p2 = Point( c, x2, y2 )
p3 = p1 + p2
print_("%s + %s = %s" % ( p1, p2, p3 ), end=' ')
if p3.x() != x3 or p3.y() != y3:
raise FailedTest("Failure: should give (%d,%d)." % ( x3, y3 ))
else:
print_(" Good.")
def test_double( c, x1, y1, x3, y3 ):
"""We expect that on curve c, 2*(x1,y1) = (x3, y3)."""
p1 = Point( c, x1, y1 )
p3 = p1.double()
print_("%s doubled = %s" % ( p1, p3 ), end=' ')
if p3.x() != x3 or p3.y() != y3:
raise FailedTest("Failure: should give (%d,%d)." % ( x3, y3 ))
else:
print_(" Good.")
def test_double_infinity( c ):
"""We expect that on curve c, 2*INFINITY = INFINITY."""
p1 = INFINITY
p3 = p1.double()
print_("%s doubled = %s" % ( p1, p3 ), end=' ')
if p3.x() != INFINITY.x() or p3.y() != INFINITY.y():
raise FailedTest("Failure: should give (%d,%d)." % ( INFINITY.x(), INFINITY.y() ))
else:
print_(" Good.")
def test_multiply( c, x1, y1, m, x3, y3 ):
"""We expect that on curve c, m*(x1,y1) = (x3,y3)."""
p1 = Point( c, x1, y1 )
p3 = p1 * m
print_("%s * %d = %s" % ( p1, m, p3 ), end=' ')
if p3.x() != x3 or p3.y() != y3:
raise FailedTest("Failure: should give (%d,%d)." % ( x3, y3 ))
else:
print_(" Good.")
# A few tests from X9.62 B.3:
c = CurveFp( 23, 1, 1 )
test_add( c, 3, 10, 9, 7, 17, 20 )
test_double( c, 3, 10, 7, 12 )
test_add( c, 3, 10, 3, 10, 7, 12 ) # (Should just invoke double.)
test_multiply( c, 3, 10, 2, 7, 12 )
test_double_infinity(c)
# From X9.62 I.1 (p. 96):
g = Point( c, 13, 7, 7 )
check = INFINITY
for i in range( 7 + 1 ):
p = ( i % 7 ) * g
print_("%s * %d = %s, expected %s . . ." % ( g, i, p, check ), end=' ')
if p == check:
print_(" Good.")
else:
raise FailedTest("Bad.")
check = check + g
# NIST Curve P-192:
p = 6277101735386680763835789423207666416083908700390324961279
r = 6277101735386680763835789423176059013767194773182842284081
#s = 0x3045ae6fc8422f64ed579528d38120eae12196d5L
c = 0x3099d2bbbfcb2538542dcd5fb078b6ef5f3d6fe2c745de65
b = 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
Gx = 0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012
Gy = 0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811
c192 = CurveFp( p, -3, b )
p192 = Point( c192, Gx, Gy, r )
# Checking against some sample computations presented
# in X9.62:
d = 651056770906015076056810763456358567190100156695615665659
Q = d * p192
if Q.x() != 0x62B12D60690CDCF330BABAB6E69763B471F994DD702D16A5:
raise FailedTest("p192 * d came out wrong.")
else:
print_("p192 * d came out right.")
k = 6140507067065001063065065565667405560006161556565665656654
R = k * p192
if R.x() != 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD \
or R.y() != 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835:
raise FailedTest("k * p192 came out wrong.")
else:
print_("k * p192 came out right.")
u1 = 2563697409189434185194736134579731015366492496392189760599
u2 = 6266643813348617967186477710235785849136406323338782220568
temp = u1 * p192 + u2 * Q
if temp.x() != 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD \
or temp.y() != 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835:
raise FailedTest("u1 * p192 + u2 * Q came out wrong.")
else:
print_("u1 * p192 + u2 * Q came out right.")
if __name__ == "__main__":
__main__()

283
bin/python/ecdsa/keys.py Normal file
View File

@@ -0,0 +1,283 @@
import binascii
from . import ecdsa
from . import der
from . import rfc6979
from .curves import NIST192p, find_curve
from .util import string_to_number, number_to_string, randrange
from .util import sigencode_string, sigdecode_string
from .util import oid_ecPublicKey, encoded_oid_ecPublicKey
from .six import PY3, b
from hashlib import sha1
class BadSignatureError(Exception):
pass
class BadDigestError(Exception):
pass
class VerifyingKey:
def __init__(self, _error__please_use_generate=None):
if not _error__please_use_generate:
raise TypeError("Please use SigningKey.generate() to construct me")
@classmethod
def from_public_point(klass, point, curve=NIST192p, hashfunc=sha1):
self = klass(_error__please_use_generate=True)
self.curve = curve
self.default_hashfunc = hashfunc
self.pubkey = ecdsa.Public_key(curve.generator, point)
self.pubkey.order = curve.order
return self
@classmethod
def from_string(klass, string, curve=NIST192p, hashfunc=sha1,
validate_point=True):
order = curve.order
assert len(string) == curve.verifying_key_length, \
(len(string), curve.verifying_key_length)
xs = string[:curve.baselen]
ys = string[curve.baselen:]
assert len(xs) == curve.baselen, (len(xs), curve.baselen)
assert len(ys) == curve.baselen, (len(ys), curve.baselen)
x = string_to_number(xs)
y = string_to_number(ys)
if validate_point:
assert ecdsa.point_is_valid(curve.generator, x, y)
from . import ellipticcurve
point = ellipticcurve.Point(curve.curve, x, y, order)
return klass.from_public_point(point, curve, hashfunc)
@classmethod
def from_pem(klass, string):
return klass.from_der(der.unpem(string))
@classmethod
def from_der(klass, string):
# [[oid_ecPublicKey,oid_curve], point_str_bitstring]
s1,empty = der.remove_sequence(string)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER pubkey: %s" %
binascii.hexlify(empty))
s2,point_str_bitstring = der.remove_sequence(s1)
# s2 = oid_ecPublicKey,oid_curve
oid_pk, rest = der.remove_object(s2)
oid_curve, empty = der.remove_object(rest)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER pubkey objects: %s" %
binascii.hexlify(empty))
assert oid_pk == oid_ecPublicKey, (oid_pk, oid_ecPublicKey)
curve = find_curve(oid_curve)
point_str, empty = der.remove_bitstring(point_str_bitstring)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after pubkey pointstring: %s" %
binascii.hexlify(empty))
assert point_str.startswith(b("\x00\x04"))
return klass.from_string(point_str[2:], curve)
def to_string(self):
# VerifyingKey.from_string(vk.to_string()) == vk as long as the
# curves are the same: the curve itself is not included in the
# serialized form
order = self.pubkey.order
x_str = number_to_string(self.pubkey.point.x(), order)
y_str = number_to_string(self.pubkey.point.y(), order)
return x_str + y_str
def to_pem(self):
return der.topem(self.to_der(), "PUBLIC KEY")
def to_der(self):
order = self.pubkey.order
x_str = number_to_string(self.pubkey.point.x(), order)
y_str = number_to_string(self.pubkey.point.y(), order)
point_str = b("\x00\x04") + x_str + y_str
return der.encode_sequence(der.encode_sequence(encoded_oid_ecPublicKey,
self.curve.encoded_oid),
der.encode_bitstring(point_str))
def verify(self, signature, data, hashfunc=None, sigdecode=sigdecode_string):
hashfunc = hashfunc or self.default_hashfunc
digest = hashfunc(data).digest()
return self.verify_digest(signature, digest, sigdecode)
def verify_digest(self, signature, digest, sigdecode=sigdecode_string):
if len(digest) > self.curve.baselen:
raise BadDigestError("this curve (%s) is too short "
"for your digest (%d)" % (self.curve.name,
8*len(digest)))
number = string_to_number(digest)
r, s = sigdecode(signature, self.pubkey.order)
sig = ecdsa.Signature(r, s)
if self.pubkey.verifies(number, sig):
return True
raise BadSignatureError
class SigningKey:
def __init__(self, _error__please_use_generate=None):
if not _error__please_use_generate:
raise TypeError("Please use SigningKey.generate() to construct me")
@classmethod
def generate(klass, curve=NIST192p, entropy=None, hashfunc=sha1):
secexp = randrange(curve.order, entropy)
return klass.from_secret_exponent(secexp, curve, hashfunc)
# to create a signing key from a short (arbitrary-length) seed, convert
# that seed into an integer with something like
# secexp=util.randrange_from_seed__X(seed, curve.order), and then pass
# that integer into SigningKey.from_secret_exponent(secexp, curve)
@classmethod
def from_secret_exponent(klass, secexp, curve=NIST192p, hashfunc=sha1):
self = klass(_error__please_use_generate=True)
self.curve = curve
self.default_hashfunc = hashfunc
self.baselen = curve.baselen
n = curve.order
assert 1 <= secexp < n
pubkey_point = curve.generator*secexp
pubkey = ecdsa.Public_key(curve.generator, pubkey_point)
pubkey.order = n
self.verifying_key = VerifyingKey.from_public_point(pubkey_point, curve,
hashfunc)
self.privkey = ecdsa.Private_key(pubkey, secexp)
self.privkey.order = n
return self
@classmethod
def from_string(klass, string, curve=NIST192p, hashfunc=sha1):
assert len(string) == curve.baselen, (len(string), curve.baselen)
secexp = string_to_number(string)
return klass.from_secret_exponent(secexp, curve, hashfunc)
@classmethod
def from_pem(klass, string, hashfunc=sha1):
# the privkey pem file has two sections: "EC PARAMETERS" and "EC
# PRIVATE KEY". The first is redundant.
if PY3 and isinstance(string, str):
string = string.encode()
privkey_pem = string[string.index(b("-----BEGIN EC PRIVATE KEY-----")):]
return klass.from_der(der.unpem(privkey_pem), hashfunc)
@classmethod
def from_der(klass, string, hashfunc=sha1):
# SEQ([int(1), octetstring(privkey),cont[0], oid(secp224r1),
# cont[1],bitstring])
s, empty = der.remove_sequence(string)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER privkey: %s" %
binascii.hexlify(empty))
one, s = der.remove_integer(s)
if one != 1:
raise der.UnexpectedDER("expected '1' at start of DER privkey,"
" got %d" % one)
privkey_str, s = der.remove_octet_string(s)
tag, curve_oid_str, s = der.remove_constructed(s)
if tag != 0:
raise der.UnexpectedDER("expected tag 0 in DER privkey,"
" got %d" % tag)
curve_oid, empty = der.remove_object(curve_oid_str)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER privkey "
"curve_oid: %s" % binascii.hexlify(empty))
curve = find_curve(curve_oid)
# we don't actually care about the following fields
#
#tag, pubkey_bitstring, s = der.remove_constructed(s)
#if tag != 1:
# raise der.UnexpectedDER("expected tag 1 in DER privkey, got %d"
# % tag)
#pubkey_str = der.remove_bitstring(pubkey_bitstring)
#if empty != "":
# raise der.UnexpectedDER("trailing junk after DER privkey "
# "pubkeystr: %s" % binascii.hexlify(empty))
# our from_string method likes fixed-length privkey strings
if len(privkey_str) < curve.baselen:
privkey_str = b("\x00")*(curve.baselen-len(privkey_str)) + privkey_str
return klass.from_string(privkey_str, curve, hashfunc)
def to_string(self):
secexp = self.privkey.secret_multiplier
s = number_to_string(secexp, self.privkey.order)
return s
def to_pem(self):
# TODO: "BEGIN ECPARAMETERS"
return der.topem(self.to_der(), "EC PRIVATE KEY")
def to_der(self):
# SEQ([int(1), octetstring(privkey),cont[0], oid(secp224r1),
# cont[1],bitstring])
encoded_vk = b("\x00\x04") + self.get_verifying_key().to_string()
return der.encode_sequence(der.encode_integer(1),
der.encode_octet_string(self.to_string()),
der.encode_constructed(0, self.curve.encoded_oid),
der.encode_constructed(1, der.encode_bitstring(encoded_vk)),
)
def get_verifying_key(self):
return self.verifying_key
def sign_deterministic(self, data, hashfunc=None, sigencode=sigencode_string):
hashfunc = hashfunc or self.default_hashfunc
digest = hashfunc(data).digest()
return self.sign_digest_deterministic(digest, hashfunc=hashfunc, sigencode=sigencode)
def sign_digest_deterministic(self, digest, hashfunc=None, sigencode=sigencode_string):
"""
Calculates 'k' from data itself, removing the need for strong
random generator and producing deterministic (reproducible) signatures.
See RFC 6979 for more details.
"""
secexp = self.privkey.secret_multiplier
k = rfc6979.generate_k(
self.curve.generator.order(), secexp, hashfunc, digest)
return self.sign_digest(digest, sigencode=sigencode, k=k)
def sign(self, data, entropy=None, hashfunc=None, sigencode=sigencode_string, k=None):
"""
hashfunc= should behave like hashlib.sha1 . The output length of the
hash (in bytes) must not be longer than the length of the curve order
(rounded up to the nearest byte), so using SHA256 with nist256p is
ok, but SHA256 with nist192p is not. (In the 2**-96ish unlikely event
of a hash output larger than the curve order, the hash will
effectively be wrapped mod n).
Use hashfunc=hashlib.sha1 to match openssl's -ecdsa-with-SHA1 mode,
or hashfunc=hashlib.sha256 for openssl-1.0.0's -ecdsa-with-SHA256.
"""
hashfunc = hashfunc or self.default_hashfunc
h = hashfunc(data).digest()
return self.sign_digest(h, entropy, sigencode, k)
def sign_digest(self, digest, entropy=None, sigencode=sigencode_string, k=None):
if len(digest) > self.curve.baselen:
raise BadDigestError("this curve (%s) is too short "
"for your digest (%d)" % (self.curve.name,
8*len(digest)))
number = string_to_number(digest)
r, s = self.sign_number(number, entropy, k)
return sigencode(r, s, self.privkey.order)
def sign_number(self, number, entropy=None, k=None):
# returns a pair of numbers
order = self.privkey.order
# privkey.sign() may raise RuntimeError in the amazingly unlikely
# (2**-192) event that r=0 or s=0, because that would leak the key.
# We could re-try with a different 'k', but we couldn't test that
# code, so I choose to allow the signature to fail instead.
# If k is set, it is used directly. In other cases
# it is generated using entropy function
if k is not None:
_k = k
else:
_k = randrange(order, entropy)
assert 1 <= _k < order
sig = self.privkey.sign(number, _k)
return sig.r, sig.s

View File

@@ -0,0 +1,613 @@
#! /usr/bin/env python
#
# Provide some simple capabilities from number theory.
#
# Version of 2008.11.14.
#
# Written in 2005 and 2006 by Peter Pearson and placed in the public domain.
# Revision history:
# 2008.11.14: Use pow( base, exponent, modulus ) for modular_exp.
# Make gcd and lcm accept arbitrarly many arguments.
from __future__ import division
from .six import print_, integer_types
from .six.moves import reduce
import math
class Error( Exception ):
"""Base class for exceptions in this module."""
pass
class SquareRootError( Error ):
pass
class NegativeExponentError( Error ):
pass
def modular_exp( base, exponent, modulus ):
"Raise base to exponent, reducing by modulus"
if exponent < 0:
raise NegativeExponentError( "Negative exponents (%d) not allowed" \
% exponent )
return pow( base, exponent, modulus )
# result = 1L
# x = exponent
# b = base + 0L
# while x > 0:
# if x % 2 > 0: result = (result * b) % modulus
# x = x // 2
# b = ( b * b ) % modulus
# return result
def polynomial_reduce_mod( poly, polymod, p ):
"""Reduce poly by polymod, integer arithmetic modulo p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# This module has been tested only by extensive use
# in calculating modular square roots.
# Just to make this easy, require a monic polynomial:
assert polymod[-1] == 1
assert len( polymod ) > 1
while len( poly ) >= len( polymod ):
if poly[-1] != 0:
for i in range( 2, len( polymod ) + 1 ):
poly[-i] = ( poly[-i] - poly[-1] * polymod[-i] ) % p
poly = poly[0:-1]
return poly
def polynomial_multiply_mod( m1, m2, polymod, p ):
"""Polynomial multiplication modulo a polynomial over ints mod p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# This is just a seat-of-the-pants implementation.
# This module has been tested only by extensive use
# in calculating modular square roots.
# Initialize the product to zero:
prod = ( len( m1 ) + len( m2 ) - 1 ) * [0]
# Add together all the cross-terms:
for i in range( len( m1 ) ):
for j in range( len( m2 ) ):
prod[i+j] = ( prod[i+j] + m1[i] * m2[j] ) % p
return polynomial_reduce_mod( prod, polymod, p )
def polynomial_exp_mod( base, exponent, polymod, p ):
"""Polynomial exponentiation modulo a polynomial over ints mod p.
Polynomials are represented as lists of coefficients
of increasing powers of x."""
# Based on the Handbook of Applied Cryptography, algorithm 2.227.
# This module has been tested only by extensive use
# in calculating modular square roots.
assert exponent < p
if exponent == 0: return [ 1 ]
G = base
k = exponent
if k%2 == 1: s = G
else: s = [ 1 ]
while k > 1:
k = k // 2
G = polynomial_multiply_mod( G, G, polymod, p )
if k%2 == 1: s = polynomial_multiply_mod( G, s, polymod, p )
return s
def jacobi( a, n ):
"""Jacobi symbol"""
# Based on the Handbook of Applied Cryptography (HAC), algorithm 2.149.
# This function has been tested by comparison with a small
# table printed in HAC, and by extensive use in calculating
# modular square roots.
assert n >= 3
assert n%2 == 1
a = a % n
if a == 0: return 0
if a == 1: return 1
a1, e = a, 0
while a1%2 == 0:
a1, e = a1//2, e+1
if e%2 == 0 or n%8 == 1 or n%8 == 7: s = 1
else: s = -1
if a1 == 1: return s
if n%4 == 3 and a1%4 == 3: s = -s
return s * jacobi( n % a1, a1 )
def square_root_mod_prime( a, p ):
"""Modular square root of a, mod p, p prime."""
# Based on the Handbook of Applied Cryptography, algorithms 3.34 to 3.39.
# This module has been tested for all values in [0,p-1] for
# every prime p from 3 to 1229.
assert 0 <= a < p
assert 1 < p
if a == 0: return 0
if p == 2: return a
jac = jacobi( a, p )
if jac == -1: raise SquareRootError( "%d has no square root modulo %d" \
% ( a, p ) )
if p % 4 == 3: return modular_exp( a, (p+1)//4, p )
if p % 8 == 5:
d = modular_exp( a, (p-1)//4, p )
if d == 1: return modular_exp( a, (p+3)//8, p )
if d == p-1: return ( 2 * a * modular_exp( 4*a, (p-5)//8, p ) ) % p
raise RuntimeError("Shouldn't get here.")
for b in range( 2, p ):
if jacobi( b*b-4*a, p ) == -1:
f = ( a, -b, 1 )
ff = polynomial_exp_mod( ( 0, 1 ), (p+1)//2, f, p )
assert ff[1] == 0
return ff[0]
raise RuntimeError("No b found.")
def inverse_mod( a, m ):
"""Inverse of a mod m."""
if a < 0 or m <= a: a = a % m
# From Ferguson and Schneier, roughly:
c, d = a, m
uc, vc, ud, vd = 1, 0, 0, 1
while c != 0:
q, c, d = divmod( d, c ) + ( c, )
uc, vc, ud, vd = ud - q*uc, vd - q*vc, uc, vc
# At this point, d is the GCD, and ud*a+vd*m = d.
# If d == 1, this means that ud is a inverse.
assert d == 1
if ud > 0: return ud
else: return ud + m
def gcd2(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b
def gcd( *a ):
"""Greatest common divisor.
Usage: gcd( [ 2, 4, 6 ] )
or: gcd( 2, 4, 6 )
"""
if len( a ) > 1: return reduce( gcd2, a )
if hasattr( a[0], "__iter__" ): return reduce( gcd2, a[0] )
return a[0]
def lcm2(a,b):
"""Least common multiple of two integers."""
return (a*b)//gcd(a,b)
def lcm( *a ):
"""Least common multiple.
Usage: lcm( [ 3, 4, 5 ] )
or: lcm( 3, 4, 5 )
"""
if len( a ) > 1: return reduce( lcm2, a )
if hasattr( a[0], "__iter__" ): return reduce( lcm2, a[0] )
return a[0]
def factorization( n ):
"""Decompose n into a list of (prime,exponent) pairs."""
assert isinstance( n, integer_types )
if n < 2: return []
result = []
d = 2
# Test the small primes:
for d in smallprimes:
if d > n: break
q, r = divmod( n, d )
if r == 0:
count = 1
while d <= n:
n = q
q, r = divmod( n, d )
if r != 0: break
count = count + 1
result.append( ( d, count ) )
# If n is still greater than the last of our small primes,
# it may require further work:
if n > smallprimes[-1]:
if is_prime( n ): # If what's left is prime, it's easy:
result.append( ( n, 1 ) )
else: # Ugh. Search stupidly for a divisor:
d = smallprimes[-1]
while 1:
d = d + 2 # Try the next divisor.
q, r = divmod( n, d )
if q < d: break # n < d*d means we're done, n = 1 or prime.
if r == 0: # d divides n. How many times?
count = 1
n = q
while d <= n: # As long as d might still divide n,
q, r = divmod( n, d ) # see if it does.
if r != 0: break
n = q # It does. Reduce n, increase count.
count = count + 1
result.append( ( d, count ) )
if n > 1: result.append( ( n, 1 ) )
return result
def phi( n ):
"""Return the Euler totient function of n."""
assert isinstance( n, integer_types )
if n < 3: return 1
result = 1
ff = factorization( n )
for f in ff:
e = f[1]
if e > 1:
result = result * f[0] ** (e-1) * ( f[0] - 1 )
else:
result = result * ( f[0] - 1 )
return result
def carmichael( n ):
"""Return Carmichael function of n.
Carmichael(n) is the smallest integer x such that
m**x = 1 mod n for all m relatively prime to n.
"""
return carmichael_of_factorized( factorization( n ) )
def carmichael_of_factorized( f_list ):
"""Return the Carmichael function of a number that is
represented as a list of (prime,exponent) pairs.
"""
if len( f_list ) < 1: return 1
result = carmichael_of_ppower( f_list[0] )
for i in range( 1, len( f_list ) ):
result = lcm( result, carmichael_of_ppower( f_list[i] ) )
return result
def carmichael_of_ppower( pp ):
"""Carmichael function of the given power of the given prime.
"""
p, a = pp
if p == 2 and a > 2: return 2**(a-2)
else: return (p-1) * p**(a-1)
def order_mod( x, m ):
"""Return the order of x in the multiplicative group mod m.
"""
# Warning: this implementation is not very clever, and will
# take a long time if m is very large.
if m <= 1: return 0
assert gcd( x, m ) == 1
z = x
result = 1
while z != 1:
z = ( z * x ) % m
result = result + 1
return result
def largest_factor_relatively_prime( a, b ):
"""Return the largest factor of a relatively prime to b.
"""
while 1:
d = gcd( a, b )
if d <= 1: break
b = d
while 1:
q, r = divmod( a, d )
if r > 0:
break
a = q
return a
def kinda_order_mod( x, m ):
"""Return the order of x in the multiplicative group mod m',
where m' is the largest factor of m relatively prime to x.
"""
return order_mod( x, largest_factor_relatively_prime( m, x ) )
def is_prime( n ):
"""Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million.
"""
# (This is used to study the risk of false positives:)
global miller_rabin_test_count
miller_rabin_test_count = 0
if n <= smallprimes[-1]:
if n in smallprimes: return True
else: return False
if gcd( n, 2*3*5*7*11 ) != 1: return False
# Choose a number of iterations sufficient to reduce the
# probability of accepting a composite below 2**-80
# (from Menezes et al. Table 4.4):
t = 40
n_bits = 1 + int( math.log( n, 2 ) )
for k, tt in ( ( 100, 27 ),
( 150, 18 ),
( 200, 15 ),
( 250, 12 ),
( 300, 9 ),
( 350, 8 ),
( 400, 7 ),
( 450, 6 ),
( 550, 5 ),
( 650, 4 ),
( 850, 3 ),
( 1300, 2 ),
):
if n_bits < k: break
t = tt
# Run the test t times:
s = 0
r = n - 1
while ( r % 2 ) == 0:
s = s + 1
r = r // 2
for i in range( t ):
a = smallprimes[ i ]
y = modular_exp( a, r, n )
if y != 1 and y != n-1:
j = 1
while j <= s - 1 and y != n - 1:
y = modular_exp( y, 2, n )
if y == 1:
miller_rabin_test_count = i + 1
return False
j = j + 1
if y != n-1:
miller_rabin_test_count = i + 1
return False
return True
def next_prime( starting_value ):
"Return the smallest prime larger than the starting value."
if starting_value < 2: return 2
result = ( starting_value + 1 ) | 1
while not is_prime( result ): result = result + 2
return result
smallprimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
151, 157, 163, 167, 173, 179, 181, 191, 193, 197,
199, 211, 223, 227, 229, 233, 239, 241, 251, 257,
263, 269, 271, 277, 281, 283, 293, 307, 311, 313,
317, 331, 337, 347, 349, 353, 359, 367, 373, 379,
383, 389, 397, 401, 409, 419, 421, 431, 433, 439,
443, 449, 457, 461, 463, 467, 479, 487, 491, 499,
503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
641, 643, 647, 653, 659, 661, 673, 677, 683, 691,
701, 709, 719, 727, 733, 739, 743, 751, 757, 761,
769, 773, 787, 797, 809, 811, 821, 823, 827, 829,
839, 853, 857, 859, 863, 877, 881, 883, 887, 907,
911, 919, 929, 937, 941, 947, 953, 967, 971, 977,
983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033,
1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,
1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163,
1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229]
miller_rabin_test_count = 0
def __main__():
# Making sure locally defined exceptions work:
# p = modular_exp( 2, -2, 3 )
# p = square_root_mod_prime( 2, 3 )
print_("Testing gcd...")
assert gcd( 3*5*7, 3*5*11, 3*5*13 ) == 3*5
assert gcd( [ 3*5*7, 3*5*11, 3*5*13 ] ) == 3*5
assert gcd( 3 ) == 3
print_("Testing lcm...")
assert lcm( 3, 5*3, 7*3 ) == 3*5*7
assert lcm( [ 3, 5*3, 7*3 ] ) == 3*5*7
assert lcm( 3 ) == 3
print_("Testing next_prime...")
bigprimes = ( 999671,
999683,
999721,
999727,
999749,
999763,
999769,
999773,
999809,
999853,
999863,
999883,
999907,
999917,
999931,
999953,
999959,
999961,
999979,
999983 )
for i in range( len( bigprimes ) - 1 ):
assert next_prime( bigprimes[i] ) == bigprimes[ i+1 ]
error_tally = 0
# Test the square_root_mod_prime function:
for p in smallprimes:
print_("Testing square_root_mod_prime for modulus p = %d." % p)
squares = []
for root in range( 0, 1+p//2 ):
sq = ( root * root ) % p
squares.append( sq )
calculated = square_root_mod_prime( sq, p )
if ( calculated * calculated ) % p != sq:
error_tally = error_tally + 1
print_("Failed to find %d as sqrt( %d ) mod %d. Said %d." % \
( root, sq, p, calculated ))
for nonsquare in range( 0, p ):
if nonsquare not in squares:
try:
calculated = square_root_mod_prime( nonsquare, p )
except SquareRootError:
pass
else:
error_tally = error_tally + 1
print_("Failed to report no root for sqrt( %d ) mod %d." % \
( nonsquare, p ))
# Test the jacobi function:
for m in range( 3, 400, 2 ):
print_("Testing jacobi for modulus m = %d." % m)
if is_prime( m ):
squares = []
for root in range( 1, m ):
if jacobi( root * root, m ) != 1:
error_tally = error_tally + 1
print_("jacobi( %d * %d, %d ) != 1" % ( root, root, m ))
squares.append( root * root % m )
for i in range( 1, m ):
if not i in squares:
if jacobi( i, m ) != -1:
error_tally = error_tally + 1
print_("jacobi( %d, %d ) != -1" % ( i, m ))
else: # m is not prime.
f = factorization( m )
for a in range( 1, m ):
c = 1
for i in f:
c = c * jacobi( a, i[0] ) ** i[1]
if c != jacobi( a, m ):
error_tally = error_tally + 1
print_("%d != jacobi( %d, %d )" % ( c, a, m ))
# Test the inverse_mod function:
print_("Testing inverse_mod . . .")
import random
n_tests = 0
for i in range( 100 ):
m = random.randint( 20, 10000 )
for j in range( 100 ):
a = random.randint( 1, m-1 )
if gcd( a, m ) == 1:
n_tests = n_tests + 1
inv = inverse_mod( a, m )
if inv <= 0 or inv >= m or ( a * inv ) % m != 1:
error_tally = error_tally + 1
print_("%d = inverse_mod( %d, %d ) is wrong." % ( inv, a, m ))
assert n_tests > 1000
print_(n_tests, " tests of inverse_mod completed.")
class FailedTest(Exception): pass
print_(error_tally, "errors detected.")
if error_tally != 0:
raise FailedTest("%d errors detected" % error_tally)
if __name__ == '__main__':
__main__()

103
bin/python/ecdsa/rfc6979.py Normal file
View File

@@ -0,0 +1,103 @@
'''
RFC 6979:
Deterministic Usage of the Digital Signature Algorithm (DSA) and
Elliptic Curve Digital Signature Algorithm (ECDSA)
http://tools.ietf.org/html/rfc6979
Many thanks to Coda Hale for his implementation in Go language:
https://github.com/codahale/rfc6979
'''
import hmac
from binascii import hexlify
from .util import number_to_string, number_to_string_crop
from .six import b
try:
bin(0)
except NameError:
binmap = {"0": "0000", "1": "0001", "2": "0010", "3": "0011",
"4": "0100", "5": "0101", "6": "0110", "7": "0111",
"8": "1000", "9": "1001", "a": "1010", "b": "1011",
"c": "1100", "d": "1101", "e": "1110", "f": "1111"}
def bin(value): # for python2.5
v = "".join(binmap[x] for x in "%x"%abs(value)).lstrip("0")
if value < 0:
return "-0b" + v
return "0b" + v
def bit_length(num):
# http://docs.python.org/dev/library/stdtypes.html#int.bit_length
s = bin(num) # binary representation: bin(-37) --> '-0b100101'
s = s.lstrip('-0b') # remove leading zeros and minus sign
return len(s) # len('100101') --> 6
def bits2int(data, qlen):
x = int(hexlify(data), 16)
l = len(data) * 8
if l > qlen:
return x >> (l-qlen)
return x
def bits2octets(data, order):
z1 = bits2int(data, bit_length(order))
z2 = z1 - order
if z2 < 0:
z2 = z1
return number_to_string_crop(z2, order)
# https://tools.ietf.org/html/rfc6979#section-3.2
def generate_k(order, secexp, hash_func, data):
'''
generator - order of the DSA generator used in the signature
secexp - secure exponent (private key) in numeric form
hash_func - reference to the same hash function used for generating hash
data - hash in binary form of the signing data
'''
qlen = bit_length(order)
holen = hash_func().digest_size
rolen = (qlen + 7) / 8
bx = number_to_string(secexp, order) + bits2octets(data, order)
# Step B
v = b('\x01') * holen
# Step C
k = b('\x00') * holen
# Step D
k = hmac.new(k, v+b('\x00')+bx, hash_func).digest()
# Step E
v = hmac.new(k, v, hash_func).digest()
# Step F
k = hmac.new(k, v+b('\x01')+bx, hash_func).digest()
# Step G
v = hmac.new(k, v, hash_func).digest()
# Step H
while True:
# Step H1
t = b('')
# Step H2
while len(t) < rolen:
v = hmac.new(k, v, hash_func).digest()
t += v
# Step H3
secret = bits2int(t, qlen)
if secret >= 1 and secret < order:
return secret
k = hmac.new(k, v+b('\x00'), hash_func).digest()
v = hmac.new(k, v, hash_func).digest()

394
bin/python/ecdsa/six.py Normal file
View File

@@ -0,0 +1,394 @@
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2012 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
if isinstance(s, unicode):
return s
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})

View File

@@ -0,0 +1,663 @@
from __future__ import with_statement, division
import unittest
import os
import time
import shutil
import subprocess
from binascii import hexlify, unhexlify
from hashlib import sha1, sha256, sha512
from .six import b, print_, binary_type
from .keys import SigningKey, VerifyingKey
from .keys import BadSignatureError
from . import util
from .util import sigencode_der, sigencode_strings
from .util import sigdecode_der, sigdecode_strings
from .curves import Curve, UnknownCurveError
from .curves import NIST192p, NIST224p, NIST256p, NIST384p, NIST521p, SECP256k1
from .ellipticcurve import Point
from . import der
from . import rfc6979
class SubprocessError(Exception):
pass
def run_openssl(cmd):
OPENSSL = "openssl"
p = subprocess.Popen([OPENSSL] + cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, ignored = p.communicate()
if p.returncode != 0:
raise SubprocessError("cmd '%s %s' failed: rc=%s, stdout/err was %s" %
(OPENSSL, cmd, p.returncode, stdout))
return stdout.decode()
BENCH = False
class ECDSA(unittest.TestCase):
def test_basic(self):
priv = SigningKey.generate()
pub = priv.get_verifying_key()
data = b("blahblah")
sig = priv.sign(data)
self.assertTrue(pub.verify(sig, data))
self.assertRaises(BadSignatureError, pub.verify, sig, data+b("bad"))
pub2 = VerifyingKey.from_string(pub.to_string())
self.assertTrue(pub2.verify(sig, data))
def test_deterministic(self):
data = b("blahblah")
secexp = int("9d0219792467d7d37b4d43298a7d0c05", 16)
priv = SigningKey.from_secret_exponent(secexp, SECP256k1, sha256)
pub = priv.get_verifying_key()
k = rfc6979.generate_k(
SECP256k1.generator.order(), secexp, sha256, sha256(data).digest())
sig1 = priv.sign(data, k=k)
self.assertTrue(pub.verify(sig1, data))
sig2 = priv.sign(data, k=k)
self.assertTrue(pub.verify(sig2, data))
sig3 = priv.sign_deterministic(data, sha256)
self.assertTrue(pub.verify(sig3, data))
self.assertEqual(sig1, sig2)
self.assertEqual(sig1, sig3)
def test_bad_usage(self):
# sk=SigningKey() is wrong
self.assertRaises(TypeError, SigningKey)
self.assertRaises(TypeError, VerifyingKey)
def test_lengths(self):
default = NIST192p
priv = SigningKey.generate()
pub = priv.get_verifying_key()
self.assertEqual(len(pub.to_string()), default.verifying_key_length)
sig = priv.sign(b("data"))
self.assertEqual(len(sig), default.signature_length)
if BENCH:
print_()
for curve in (NIST192p, NIST224p, NIST256p, NIST384p, NIST521p):
start = time.time()
priv = SigningKey.generate(curve=curve)
pub1 = priv.get_verifying_key()
keygen_time = time.time() - start
pub2 = VerifyingKey.from_string(pub1.to_string(), curve)
self.assertEqual(pub1.to_string(), pub2.to_string())
self.assertEqual(len(pub1.to_string()),
curve.verifying_key_length)
start = time.time()
sig = priv.sign(b("data"))
sign_time = time.time() - start
self.assertEqual(len(sig), curve.signature_length)
if BENCH:
start = time.time()
pub1.verify(sig, b("data"))
verify_time = time.time() - start
print_("%s: siglen=%d, keygen=%0.3fs, sign=%0.3f, verify=%0.3f" \
% (curve.name, curve.signature_length,
keygen_time, sign_time, verify_time))
def test_serialize(self):
seed = b("secret")
curve = NIST192p
secexp1 = util.randrange_from_seed__trytryagain(seed, curve.order)
secexp2 = util.randrange_from_seed__trytryagain(seed, curve.order)
self.assertEqual(secexp1, secexp2)
priv1 = SigningKey.from_secret_exponent(secexp1, curve)
priv2 = SigningKey.from_secret_exponent(secexp2, curve)
self.assertEqual(hexlify(priv1.to_string()),
hexlify(priv2.to_string()))
self.assertEqual(priv1.to_pem(), priv2.to_pem())
pub1 = priv1.get_verifying_key()
pub2 = priv2.get_verifying_key()
data = b("data")
sig1 = priv1.sign(data)
sig2 = priv2.sign(data)
self.assertTrue(pub1.verify(sig1, data))
self.assertTrue(pub2.verify(sig1, data))
self.assertTrue(pub1.verify(sig2, data))
self.assertTrue(pub2.verify(sig2, data))
self.assertEqual(hexlify(pub1.to_string()),
hexlify(pub2.to_string()))
def test_nonrandom(self):
s = b("all the entropy in the entire world, compressed into one line")
def not_much_entropy(numbytes):
return s[:numbytes]
# we control the entropy source, these two keys should be identical:
priv1 = SigningKey.generate(entropy=not_much_entropy)
priv2 = SigningKey.generate(entropy=not_much_entropy)
self.assertEqual(hexlify(priv1.get_verifying_key().to_string()),
hexlify(priv2.get_verifying_key().to_string()))
# likewise, signatures should be identical. Obviously you'd never
# want to do this with keys you care about, because the secrecy of
# the private key depends upon using different random numbers for
# each signature
sig1 = priv1.sign(b("data"), entropy=not_much_entropy)
sig2 = priv2.sign(b("data"), entropy=not_much_entropy)
self.assertEqual(hexlify(sig1), hexlify(sig2))
def assertTruePrivkeysEqual(self, priv1, priv2):
self.assertEqual(priv1.privkey.secret_multiplier,
priv2.privkey.secret_multiplier)
self.assertEqual(priv1.privkey.public_key.generator,
priv2.privkey.public_key.generator)
def failIfPrivkeysEqual(self, priv1, priv2):
self.failIfEqual(priv1.privkey.secret_multiplier,
priv2.privkey.secret_multiplier)
def test_privkey_creation(self):
s = b("all the entropy in the entire world, compressed into one line")
def not_much_entropy(numbytes):
return s[:numbytes]
priv1 = SigningKey.generate()
self.assertEqual(priv1.baselen, NIST192p.baselen)
priv1 = SigningKey.generate(curve=NIST224p)
self.assertEqual(priv1.baselen, NIST224p.baselen)
priv1 = SigningKey.generate(entropy=not_much_entropy)
self.assertEqual(priv1.baselen, NIST192p.baselen)
priv2 = SigningKey.generate(entropy=not_much_entropy)
self.assertEqual(priv2.baselen, NIST192p.baselen)
self.assertTruePrivkeysEqual(priv1, priv2)
priv1 = SigningKey.from_secret_exponent(secexp=3)
self.assertEqual(priv1.baselen, NIST192p.baselen)
priv2 = SigningKey.from_secret_exponent(secexp=3)
self.assertTruePrivkeysEqual(priv1, priv2)
priv1 = SigningKey.from_secret_exponent(secexp=4, curve=NIST224p)
self.assertEqual(priv1.baselen, NIST224p.baselen)
def test_privkey_strings(self):
priv1 = SigningKey.generate()
s1 = priv1.to_string()
self.assertEqual(type(s1), binary_type)
self.assertEqual(len(s1), NIST192p.baselen)
priv2 = SigningKey.from_string(s1)
self.assertTruePrivkeysEqual(priv1, priv2)
s1 = priv1.to_pem()
self.assertEqual(type(s1), binary_type)
self.assertTrue(s1.startswith(b("-----BEGIN EC PRIVATE KEY-----")))
self.assertTrue(s1.strip().endswith(b("-----END EC PRIVATE KEY-----")))
priv2 = SigningKey.from_pem(s1)
self.assertTruePrivkeysEqual(priv1, priv2)
s1 = priv1.to_der()
self.assertEqual(type(s1), binary_type)
priv2 = SigningKey.from_der(s1)
self.assertTruePrivkeysEqual(priv1, priv2)
priv1 = SigningKey.generate(curve=NIST256p)
s1 = priv1.to_pem()
self.assertEqual(type(s1), binary_type)
self.assertTrue(s1.startswith(b("-----BEGIN EC PRIVATE KEY-----")))
self.assertTrue(s1.strip().endswith(b("-----END EC PRIVATE KEY-----")))
priv2 = SigningKey.from_pem(s1)
self.assertTruePrivkeysEqual(priv1, priv2)
s1 = priv1.to_der()
self.assertEqual(type(s1), binary_type)
priv2 = SigningKey.from_der(s1)
self.assertTruePrivkeysEqual(priv1, priv2)
def assertTruePubkeysEqual(self, pub1, pub2):
self.assertEqual(pub1.pubkey.point, pub2.pubkey.point)
self.assertEqual(pub1.pubkey.generator, pub2.pubkey.generator)
self.assertEqual(pub1.curve, pub2.curve)
def test_pubkey_strings(self):
priv1 = SigningKey.generate()
pub1 = priv1.get_verifying_key()
s1 = pub1.to_string()
self.assertEqual(type(s1), binary_type)
self.assertEqual(len(s1), NIST192p.verifying_key_length)
pub2 = VerifyingKey.from_string(s1)
self.assertTruePubkeysEqual(pub1, pub2)
priv1 = SigningKey.generate(curve=NIST256p)
pub1 = priv1.get_verifying_key()
s1 = pub1.to_string()
self.assertEqual(type(s1), binary_type)
self.assertEqual(len(s1), NIST256p.verifying_key_length)
pub2 = VerifyingKey.from_string(s1, curve=NIST256p)
self.assertTruePubkeysEqual(pub1, pub2)
pub1_der = pub1.to_der()
self.assertEqual(type(pub1_der), binary_type)
pub2 = VerifyingKey.from_der(pub1_der)
self.assertTruePubkeysEqual(pub1, pub2)
self.assertRaises(der.UnexpectedDER,
VerifyingKey.from_der, pub1_der+b("junk"))
badpub = VerifyingKey.from_der(pub1_der)
class FakeGenerator:
def order(self): return 123456789
badcurve = Curve("unknown", None, None, FakeGenerator(), (1,2,3,4,5,6))
badpub.curve = badcurve
badder = badpub.to_der()
self.assertRaises(UnknownCurveError, VerifyingKey.from_der, badder)
pem = pub1.to_pem()
self.assertEqual(type(pem), binary_type)
self.assertTrue(pem.startswith(b("-----BEGIN PUBLIC KEY-----")), pem)
self.assertTrue(pem.strip().endswith(b("-----END PUBLIC KEY-----")), pem)
pub2 = VerifyingKey.from_pem(pem)
self.assertTruePubkeysEqual(pub1, pub2)
def test_signature_strings(self):
priv1 = SigningKey.generate()
pub1 = priv1.get_verifying_key()
data = b("data")
sig = priv1.sign(data)
self.assertEqual(type(sig), binary_type)
self.assertEqual(len(sig), NIST192p.signature_length)
self.assertTrue(pub1.verify(sig, data))
sig = priv1.sign(data, sigencode=sigencode_strings)
self.assertEqual(type(sig), tuple)
self.assertEqual(len(sig), 2)
self.assertEqual(type(sig[0]), binary_type)
self.assertEqual(type(sig[1]), binary_type)
self.assertEqual(len(sig[0]), NIST192p.baselen)
self.assertEqual(len(sig[1]), NIST192p.baselen)
self.assertTrue(pub1.verify(sig, data, sigdecode=sigdecode_strings))
sig_der = priv1.sign(data, sigencode=sigencode_der)
self.assertEqual(type(sig_der), binary_type)
self.assertTrue(pub1.verify(sig_der, data, sigdecode=sigdecode_der))
def test_hashfunc(self):
sk = SigningKey.generate(curve=NIST256p, hashfunc=sha256)
data = b("security level is 128 bits")
sig = sk.sign(data)
vk = VerifyingKey.from_string(sk.get_verifying_key().to_string(),
curve=NIST256p, hashfunc=sha256)
self.assertTrue(vk.verify(sig, data))
sk2 = SigningKey.generate(curve=NIST256p)
sig2 = sk2.sign(data, hashfunc=sha256)
vk2 = VerifyingKey.from_string(sk2.get_verifying_key().to_string(),
curve=NIST256p, hashfunc=sha256)
self.assertTrue(vk2.verify(sig2, data))
vk3 = VerifyingKey.from_string(sk.get_verifying_key().to_string(),
curve=NIST256p)
self.assertTrue(vk3.verify(sig, data, hashfunc=sha256))
class OpenSSL(unittest.TestCase):
# test interoperability with OpenSSL tools. Note that openssl's ECDSA
# sign/verify arguments changed between 0.9.8 and 1.0.0: the early
# versions require "-ecdsa-with-SHA1", the later versions want just
# "-SHA1" (or to leave out that argument entirely, which means the
# signature will use some default digest algorithm, probably determined
# by the key, probably always SHA1).
#
# openssl ecparam -name secp224r1 -genkey -out privkey.pem
# openssl ec -in privkey.pem -text -noout # get the priv/pub keys
# openssl dgst -ecdsa-with-SHA1 -sign privkey.pem -out data.sig data.txt
# openssl asn1parse -in data.sig -inform DER
# data.sig is 64 bytes, probably 56b plus ASN1 overhead
# openssl dgst -ecdsa-with-SHA1 -prverify privkey.pem -signature data.sig data.txt ; echo $?
# openssl ec -in privkey.pem -pubout -out pubkey.pem
# openssl ec -in privkey.pem -pubout -outform DER -out pubkey.der
def get_openssl_messagedigest_arg(self):
v = run_openssl("version")
# e.g. "OpenSSL 1.0.0 29 Mar 2010", or "OpenSSL 1.0.0a 1 Jun 2010",
# or "OpenSSL 0.9.8o 01 Jun 2010"
vs = v.split()[1].split(".")
if vs >= ["1","0","0"]:
return "-SHA1"
else:
return "-ecdsa-with-SHA1"
# sk: 1:OpenSSL->python 2:python->OpenSSL
# vk: 3:OpenSSL->python 4:python->OpenSSL
# sig: 5:OpenSSL->python 6:python->OpenSSL
def test_from_openssl_nist192p(self):
return self.do_test_from_openssl(NIST192p)
def test_from_openssl_nist224p(self):
return self.do_test_from_openssl(NIST224p)
def test_from_openssl_nist256p(self):
return self.do_test_from_openssl(NIST256p)
def test_from_openssl_nist384p(self):
return self.do_test_from_openssl(NIST384p)
def test_from_openssl_nist521p(self):
return self.do_test_from_openssl(NIST521p)
def test_from_openssl_secp256k1(self):
return self.do_test_from_openssl(SECP256k1)
def do_test_from_openssl(self, curve):
curvename = curve.openssl_name
assert curvename
# OpenSSL: create sk, vk, sign.
# Python: read vk(3), checksig(5), read sk(1), sign, check
mdarg = self.get_openssl_messagedigest_arg()
if os.path.isdir("t"):
shutil.rmtree("t")
os.mkdir("t")
run_openssl("ecparam -name %s -genkey -out t/privkey.pem" % curvename)
run_openssl("ec -in t/privkey.pem -pubout -out t/pubkey.pem")
data = b("data")
with open("t/data.txt","wb") as e: e.write(data)
run_openssl("dgst %s -sign t/privkey.pem -out t/data.sig t/data.txt" % mdarg)
run_openssl("dgst %s -verify t/pubkey.pem -signature t/data.sig t/data.txt" % mdarg)
with open("t/pubkey.pem","rb") as e: pubkey_pem = e.read()
vk = VerifyingKey.from_pem(pubkey_pem) # 3
with open("t/data.sig","rb") as e: sig_der = e.read()
self.assertTrue(vk.verify(sig_der, data, # 5
hashfunc=sha1, sigdecode=sigdecode_der))
with open("t/privkey.pem") as e: fp = e.read()
sk = SigningKey.from_pem(fp) # 1
sig = sk.sign(data)
self.assertTrue(vk.verify(sig, data))
def test_to_openssl_nist192p(self):
self.do_test_to_openssl(NIST192p)
def test_to_openssl_nist224p(self):
self.do_test_to_openssl(NIST224p)
def test_to_openssl_nist256p(self):
self.do_test_to_openssl(NIST256p)
def test_to_openssl_nist384p(self):
self.do_test_to_openssl(NIST384p)
def test_to_openssl_nist521p(self):
self.do_test_to_openssl(NIST521p)
def test_to_openssl_secp256k1(self):
self.do_test_to_openssl(SECP256k1)
def do_test_to_openssl(self, curve):
curvename = curve.openssl_name
assert curvename
# Python: create sk, vk, sign.
# OpenSSL: read vk(4), checksig(6), read sk(2), sign, check
mdarg = self.get_openssl_messagedigest_arg()
if os.path.isdir("t"):
shutil.rmtree("t")
os.mkdir("t")
sk = SigningKey.generate(curve=curve)
vk = sk.get_verifying_key()
data = b("data")
with open("t/pubkey.der","wb") as e: e.write(vk.to_der()) # 4
with open("t/pubkey.pem","wb") as e: e.write(vk.to_pem()) # 4
sig_der = sk.sign(data, hashfunc=sha1, sigencode=sigencode_der)
with open("t/data.sig","wb") as e: e.write(sig_der) # 6
with open("t/data.txt","wb") as e: e.write(data)
with open("t/baddata.txt","wb") as e: e.write(data+b("corrupt"))
self.assertRaises(SubprocessError, run_openssl,
"dgst %s -verify t/pubkey.der -keyform DER -signature t/data.sig t/baddata.txt" % mdarg)
run_openssl("dgst %s -verify t/pubkey.der -keyform DER -signature t/data.sig t/data.txt" % mdarg)
with open("t/privkey.pem","wb") as e: e.write(sk.to_pem()) # 2
run_openssl("dgst %s -sign t/privkey.pem -out t/data.sig2 t/data.txt" % mdarg)
run_openssl("dgst %s -verify t/pubkey.pem -signature t/data.sig2 t/data.txt" % mdarg)
class DER(unittest.TestCase):
def test_oids(self):
oid_ecPublicKey = der.encode_oid(1, 2, 840, 10045, 2, 1)
self.assertEqual(hexlify(oid_ecPublicKey), b("06072a8648ce3d0201"))
self.assertEqual(hexlify(NIST224p.encoded_oid), b("06052b81040021"))
self.assertEqual(hexlify(NIST256p.encoded_oid),
b("06082a8648ce3d030107"))
x = oid_ecPublicKey + b("more")
x1, rest = der.remove_object(x)
self.assertEqual(x1, (1, 2, 840, 10045, 2, 1))
self.assertEqual(rest, b("more"))
def test_integer(self):
self.assertEqual(der.encode_integer(0), b("\x02\x01\x00"))
self.assertEqual(der.encode_integer(1), b("\x02\x01\x01"))
self.assertEqual(der.encode_integer(127), b("\x02\x01\x7f"))
self.assertEqual(der.encode_integer(128), b("\x02\x02\x00\x80"))
self.assertEqual(der.encode_integer(256), b("\x02\x02\x01\x00"))
#self.assertEqual(der.encode_integer(-1), b("\x02\x01\xff"))
def s(n): return der.remove_integer(der.encode_integer(n) + b("junk"))
self.assertEqual(s(0), (0, b("junk")))
self.assertEqual(s(1), (1, b("junk")))
self.assertEqual(s(127), (127, b("junk")))
self.assertEqual(s(128), (128, b("junk")))
self.assertEqual(s(256), (256, b("junk")))
self.assertEqual(s(1234567890123456789012345678901234567890),
(1234567890123456789012345678901234567890,b("junk")))
def test_number(self):
self.assertEqual(der.encode_number(0), b("\x00"))
self.assertEqual(der.encode_number(127), b("\x7f"))
self.assertEqual(der.encode_number(128), b("\x81\x00"))
self.assertEqual(der.encode_number(3*128+7), b("\x83\x07"))
#self.assertEqual(der.read_number("\x81\x9b"+"more"), (155, 2))
#self.assertEqual(der.encode_number(155), b("\x81\x9b"))
for n in (0, 1, 2, 127, 128, 3*128+7, 840, 10045): #, 155):
x = der.encode_number(n) + b("more")
n1, llen = der.read_number(x)
self.assertEqual(n1, n)
self.assertEqual(x[llen:], b("more"))
def test_length(self):
self.assertEqual(der.encode_length(0), b("\x00"))
self.assertEqual(der.encode_length(127), b("\x7f"))
self.assertEqual(der.encode_length(128), b("\x81\x80"))
self.assertEqual(der.encode_length(255), b("\x81\xff"))
self.assertEqual(der.encode_length(256), b("\x82\x01\x00"))
self.assertEqual(der.encode_length(3*256+7), b("\x82\x03\x07"))
self.assertEqual(der.read_length(b("\x81\x9b")+b("more")), (155, 2))
self.assertEqual(der.encode_length(155), b("\x81\x9b"))
for n in (0, 1, 2, 127, 128, 255, 256, 3*256+7, 155):
x = der.encode_length(n) + b("more")
n1, llen = der.read_length(x)
self.assertEqual(n1, n)
self.assertEqual(x[llen:], b("more"))
def test_sequence(self):
x = der.encode_sequence(b("ABC"), b("DEF")) + b("GHI")
self.assertEqual(x, b("\x30\x06ABCDEFGHI"))
x1, rest = der.remove_sequence(x)
self.assertEqual(x1, b("ABCDEF"))
self.assertEqual(rest, b("GHI"))
def test_constructed(self):
x = der.encode_constructed(0, NIST224p.encoded_oid)
self.assertEqual(hexlify(x), b("a007") + b("06052b81040021"))
x = der.encode_constructed(1, unhexlify(b("0102030a0b0c")))
self.assertEqual(hexlify(x), b("a106") + b("0102030a0b0c"))
class Util(unittest.TestCase):
def test_trytryagain(self):
tta = util.randrange_from_seed__trytryagain
for i in range(1000):
seed = "seed-%d" % i
for order in (2**8-2, 2**8-1, 2**8, 2**8+1, 2**8+2,
2**16-1, 2**16+1):
n = tta(seed, order)
self.assertTrue(1 <= n < order, (1, n, order))
# this trytryagain *does* provide long-term stability
self.assertEqual(("%x"%(tta("seed", NIST224p.order))).encode(),
b("6fa59d73bf0446ae8743cf748fc5ac11d5585a90356417e97155c3bc"))
def test_randrange(self):
# util.randrange does not provide long-term stability: we might
# change the algorithm in the future.
for i in range(1000):
entropy = util.PRNG("seed-%d" % i)
for order in (2**8-2, 2**8-1, 2**8,
2**16-1, 2**16+1,
):
# that oddball 2**16+1 takes half our runtime
n = util.randrange(order, entropy=entropy)
self.assertTrue(1 <= n < order, (1, n, order))
def OFF_test_prove_uniformity(self):
order = 2**8-2
counts = dict([(i, 0) for i in range(1, order)])
assert 0 not in counts
assert order not in counts
for i in range(1000000):
seed = "seed-%d" % i
n = util.randrange_from_seed__trytryagain(seed, order)
counts[n] += 1
# this technique should use the full range
self.assertTrue(counts[order-1])
for i in range(1, order):
print_("%3d: %s" % (i, "*"*(counts[i]//100)))
class RFC6979(unittest.TestCase):
# https://tools.ietf.org/html/rfc6979#appendix-A.1
def _do(self, generator, secexp, hsh, hash_func, expected):
actual = rfc6979.generate_k(generator.order(), secexp, hash_func, hsh)
self.assertEqual(expected, actual)
def test_SECP256k1(self):
'''RFC doesn't contain test vectors for SECP256k1 used in bitcoin.
This vector has been computed by Golang reference implementation instead.'''
self._do(
generator = SECP256k1.generator,
secexp = int("9d0219792467d7d37b4d43298a7d0c05", 16),
hsh = sha256(b("sample")).digest(),
hash_func = sha256,
expected = int("8fa1f95d514760e498f28957b824ee6ec39ed64826ff4fecc2b5739ec45b91cd", 16))
def test_SECP256k1_2(self):
self._do(
generator=SECP256k1.generator,
secexp=int("cca9fbcc1b41e5a95d369eaa6ddcff73b61a4efaa279cfc6567e8daa39cbaf50", 16),
hsh=sha256(b("sample")).digest(),
hash_func=sha256,
expected=int("2df40ca70e639d89528a6b670d9d48d9165fdc0febc0974056bdce192b8e16a3", 16))
def test_SECP256k1_3(self):
self._do(
generator=SECP256k1.generator,
secexp=0x1,
hsh=sha256(b("Satoshi Nakamoto")).digest(),
hash_func=sha256,
expected=0x8F8A276C19F4149656B280621E358CCE24F5F52542772691EE69063B74F15D15)
def test_SECP256k1_4(self):
self._do(
generator=SECP256k1.generator,
secexp=0x1,
hsh=sha256(b("All those moments will be lost in time, like tears in rain. Time to die...")).digest(),
hash_func=sha256,
expected=0x38AA22D72376B4DBC472E06C3BA403EE0A394DA63FC58D88686C611ABA98D6B3)
def test_SECP256k1_5(self):
self._do(
generator=SECP256k1.generator,
secexp=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364140,
hsh=sha256(b("Satoshi Nakamoto")).digest(),
hash_func=sha256,
expected=0x33A19B60E25FB6F4435AF53A3D42D493644827367E6453928554F43E49AA6F90)
def test_SECP256k1_6(self):
self._do(
generator=SECP256k1.generator,
secexp=0xf8b8af8ce3c7cca5e300d33939540c10d45ce001b8f252bfbc57ba0342904181,
hsh=sha256(b("Alan Turing")).digest(),
hash_func=sha256,
expected=0x525A82B70E67874398067543FD84C83D30C175FDC45FDEEE082FE13B1D7CFDF1)
def test_1(self):
# Basic example of the RFC, it also tests 'try-try-again' from Step H of rfc6979
self._do(
generator = Point(None, 0, 0, int("4000000000000000000020108A2E0CC0D99F8A5EF", 16)),
secexp = int("09A4D6792295A7F730FC3F2B49CBC0F62E862272F", 16),
hsh = unhexlify(b("AF2BDBE1AA9B6EC1E2ADE1D694F41FC71A831D0268E9891562113D8A62ADD1BF")),
hash_func = sha256,
expected = int("23AF4074C90A02B3FE61D286D5C87F425E6BDD81B", 16))
def test_2(self):
self._do(
generator=NIST192p.generator,
secexp = int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
hsh = sha1(b("sample")).digest(),
hash_func = sha1,
expected = int("37D7CA00D2C7B0E5E412AC03BD44BA837FDD5B28CD3B0021", 16))
def test_3(self):
self._do(
generator=NIST192p.generator,
secexp = int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
hsh = sha256(b("sample")).digest(),
hash_func = sha256,
expected = int("32B1B6D7D42A05CB449065727A84804FB1A3E34D8F261496", 16))
def test_4(self):
self._do(
generator=NIST192p.generator,
secexp = int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
hsh = sha512(b("sample")).digest(),
hash_func = sha512,
expected = int("A2AC7AB055E4F20692D49209544C203A7D1F2C0BFBC75DB1", 16))
def test_5(self):
self._do(
generator=NIST192p.generator,
secexp = int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
hsh = sha1(b("test")).digest(),
hash_func = sha1,
expected = int("D9CF9C3D3297D3260773A1DA7418DB5537AB8DD93DE7FA25", 16))
def test_6(self):
self._do(
generator=NIST192p.generator,
secexp = int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
hsh = sha256(b("test")).digest(),
hash_func = sha256,
expected = int("5C4CE89CF56D9E7C77C8585339B006B97B5F0680B4306C6C", 16))
def test_7(self):
self._do(
generator=NIST192p.generator,
secexp = int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
hsh = sha512(b("test")).digest(),
hash_func = sha512,
expected = int("0758753A5254759C7CFBAD2E2D9B0792EEE44136C9480527", 16))
def test_8(self):
self._do(
generator=NIST521p.generator,
secexp = int("0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538", 16),
hsh = sha1(b("sample")).digest(),
hash_func = sha1,
expected = int("089C071B419E1C2820962321787258469511958E80582E95D8378E0C2CCDB3CB42BEDE42F50E3FA3C71F5A76724281D31D9C89F0F91FC1BE4918DB1C03A5838D0F9", 16))
def test_9(self):
self._do(
generator=NIST521p.generator,
secexp = int("0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538", 16),
hsh = sha256(b("sample")).digest(),
hash_func = sha256,
expected = int("0EDF38AFCAAECAB4383358B34D67C9F2216C8382AAEA44A3DAD5FDC9C32575761793FEF24EB0FC276DFC4F6E3EC476752F043CF01415387470BCBD8678ED2C7E1A0", 16))
def test_10(self):
self._do(
generator=NIST521p.generator,
secexp = int("0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538", 16),
hsh = sha512(b("test")).digest(),
hash_func = sha512,
expected = int("16200813020EC986863BEDFC1B121F605C1215645018AEA1A7B215A564DE9EB1B38A67AA1128B80CE391C4FB71187654AAA3431027BFC7F395766CA988C964DC56D", 16))
def __main__():
unittest.main()
if __name__ == "__main__":
__main__()

247
bin/python/ecdsa/util.py Normal file
View File

@@ -0,0 +1,247 @@
from __future__ import division
import os
import math
import binascii
from hashlib import sha256
from . import der
from .curves import orderlen
from .six import PY3, int2byte, b, next
# RFC5480:
# The "unrestricted" algorithm identifier is:
# id-ecPublicKey OBJECT IDENTIFIER ::= {
# iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
oid_ecPublicKey = (1, 2, 840, 10045, 2, 1)
encoded_oid_ecPublicKey = der.encode_oid(*oid_ecPublicKey)
def randrange(order, entropy=None):
"""Return a random integer k such that 1 <= k < order, uniformly
distributed across that range. For simplicity, this only behaves well if
'order' is fairly close (but below) a power of 256. The try-try-again
algorithm we use takes longer and longer time (on average) to complete as
'order' falls, rising to a maximum of avg=512 loops for the worst-case
(256**k)+1 . All of the standard curves behave well. There is a cutoff at
10k loops (which raises RuntimeError) to prevent an infinite loop when
something is really broken like the entropy function not working.
Note that this function is not declared to be forwards-compatible: we may
change the behavior in future releases. The entropy= argument (which
should get a callable that behaves like os.urandom) can be used to
achieve stability within a given release (for repeatable unit tests), but
should not be used as a long-term-compatible key generation algorithm.
"""
# we could handle arbitrary orders (even 256**k+1) better if we created
# candidates bit-wise instead of byte-wise, which would reduce the
# worst-case behavior to avg=2 loops, but that would be more complex. The
# change would be to round the order up to a power of 256, subtract one
# (to get 0xffff..), use that to get a byte-long mask for the top byte,
# generate the len-1 entropy bytes, generate one extra byte and mask off
# the top bits, then combine it with the rest. Requires jumping back and
# forth between strings and integers a lot.
if entropy is None:
entropy = os.urandom
assert order > 1
bytes = orderlen(order)
dont_try_forever = 10000 # gives about 2**-60 failures for worst case
while dont_try_forever > 0:
dont_try_forever -= 1
candidate = string_to_number(entropy(bytes)) + 1
if 1 <= candidate < order:
return candidate
continue
raise RuntimeError("randrange() tried hard but gave up, either something"
" is very wrong or you got realllly unlucky. Order was"
" %x" % order)
class PRNG:
# this returns a callable which, when invoked with an integer N, will
# return N pseudorandom bytes. Note: this is a short-term PRNG, meant
# primarily for the needs of randrange_from_seed__trytryagain(), which
# only needs to run it a few times per seed. It does not provide
# protection against state compromise (forward security).
def __init__(self, seed):
self.generator = self.block_generator(seed)
def __call__(self, numbytes):
a = [next(self.generator) for i in range(numbytes)]
if PY3:
return bytes(a)
else:
return "".join(a)
def block_generator(self, seed):
counter = 0
while True:
for byte in sha256(("prng-%d-%s" % (counter, seed)).encode()).digest():
yield byte
counter += 1
def randrange_from_seed__overshoot_modulo(seed, order):
# hash the data, then turn the digest into a number in [1,order).
#
# We use David-Sarah Hopwood's suggestion: turn it into a number that's
# sufficiently larger than the group order, then modulo it down to fit.
# This should give adequate (but not perfect) uniformity, and simple
# code. There are other choices: try-try-again is the main one.
base = PRNG(seed)(2*orderlen(order))
number = (int(binascii.hexlify(base), 16) % (order-1)) + 1
assert 1 <= number < order, (1, number, order)
return number
def lsb_of_ones(numbits):
return (1 << numbits) - 1
def bits_and_bytes(order):
bits = int(math.log(order-1, 2)+1)
bytes = bits // 8
extrabits = bits % 8
return bits, bytes, extrabits
# the following randrange_from_seed__METHOD() functions take an
# arbitrarily-sized secret seed and turn it into a number that obeys the same
# range limits as randrange() above. They are meant for deriving consistent
# signing keys from a secret rather than generating them randomly, for
# example a protocol in which three signing keys are derived from a master
# secret. You should use a uniformly-distributed unguessable seed with about
# curve.baselen bytes of entropy. To use one, do this:
# seed = os.urandom(curve.baselen) # or other starting point
# secexp = ecdsa.util.randrange_from_seed__trytryagain(sed, curve.order)
# sk = SigningKey.from_secret_exponent(secexp, curve)
def randrange_from_seed__truncate_bytes(seed, order, hashmod=sha256):
# hash the seed, then turn the digest into a number in [1,order), but
# don't worry about trying to uniformly fill the range. This will lose,
# on average, four bits of entropy.
bits, bytes, extrabits = bits_and_bytes(order)
if extrabits:
bytes += 1
base = hashmod(seed).digest()[:bytes]
base = "\x00"*(bytes-len(base)) + base
number = 1+int(binascii.hexlify(base), 16)
assert 1 <= number < order
return number
def randrange_from_seed__truncate_bits(seed, order, hashmod=sha256):
# like string_to_randrange_truncate_bytes, but only lose an average of
# half a bit
bits = int(math.log(order-1, 2)+1)
maxbytes = (bits+7) // 8
base = hashmod(seed).digest()[:maxbytes]
base = "\x00"*(maxbytes-len(base)) + base
topbits = 8*maxbytes - bits
if topbits:
base = int2byte(ord(base[0]) & lsb_of_ones(topbits)) + base[1:]
number = 1+int(binascii.hexlify(base), 16)
assert 1 <= number < order
return number
def randrange_from_seed__trytryagain(seed, order):
# figure out exactly how many bits we need (rounded up to the nearest
# bit), so we can reduce the chance of looping to less than 0.5 . This is
# specified to feed from a byte-oriented PRNG, and discards the
# high-order bits of the first byte as necessary to get the right number
# of bits. The average number of loops will range from 1.0 (when
# order=2**k-1) to 2.0 (when order=2**k+1).
assert order > 1
bits, bytes, extrabits = bits_and_bytes(order)
generate = PRNG(seed)
while True:
extrabyte = b("")
if extrabits:
extrabyte = int2byte(ord(generate(1)) & lsb_of_ones(extrabits))
guess = string_to_number(extrabyte + generate(bytes)) + 1
if 1 <= guess < order:
return guess
def number_to_string(num, order):
l = orderlen(order)
fmt_str = "%0" + str(2*l) + "x"
string = binascii.unhexlify((fmt_str % num).encode())
assert len(string) == l, (len(string), l)
return string
def number_to_string_crop(num, order):
l = orderlen(order)
fmt_str = "%0" + str(2*l) + "x"
string = binascii.unhexlify((fmt_str % num).encode())
return string[:l]
def string_to_number(string):
return int(binascii.hexlify(string), 16)
def string_to_number_fixedlen(string, order):
l = orderlen(order)
assert len(string) == l, (len(string), l)
return int(binascii.hexlify(string), 16)
# these methods are useful for the sigencode= argument to SK.sign() and the
# sigdecode= argument to VK.verify(), and control how the signature is packed
# or unpacked.
def sigencode_strings(r, s, order):
r_str = number_to_string(r, order)
s_str = number_to_string(s, order)
return (r_str, s_str)
def sigencode_string(r, s, order):
# for any given curve, the size of the signature numbers is
# fixed, so just use simple concatenation
r_str, s_str = sigencode_strings(r, s, order)
return r_str + s_str
def sigencode_der(r, s, order):
return der.encode_sequence(der.encode_integer(r), der.encode_integer(s))
# canonical versions of sigencode methods
# these enforce low S values, by negating the value (modulo the order) if above order/2
# see CECKey::Sign() https://github.com/bitcoin/bitcoin/blob/master/src/key.cpp#L214
def sigencode_strings_canonize(r, s, order):
if s > order / 2:
s = order - s
return sigencode_strings(r, s, order)
def sigencode_string_canonize(r, s, order):
if s > order / 2:
s = order - s
return sigencode_string(r, s, order)
def sigencode_der_canonize(r, s, order):
if s > order / 2:
s = order - s
return sigencode_der(r, s, order)
def sigdecode_string(signature, order):
l = orderlen(order)
assert len(signature) == 2*l, (len(signature), 2*l)
r = string_to_number_fixedlen(signature[:l], order)
s = string_to_number_fixedlen(signature[l:], order)
return r, s
def sigdecode_strings(rs_strings, order):
(r_str, s_str) = rs_strings
l = orderlen(order)
assert len(r_str) == l, (len(r_str), l)
assert len(s_str) == l, (len(s_str), l)
r = string_to_number_fixedlen(r_str, order)
s = string_to_number_fixedlen(s_str, order)
return r, s
def sigdecode_der(sig_der, order):
#return der.encode_sequence(der.encode_integer(r), der.encode_integer(s))
rs_strings, empty = der.remove_sequence(sig_der)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER sig: %s" %
binascii.hexlify(empty))
r, rest = der.remove_integer(rs_strings)
s, empty = der.remove_integer(rest)
if empty != b(""):
raise der.UnexpectedDER("trailing junk after DER numbers: %s" %
binascii.hexlify(empty))
return r, s

105
bin/python/ed25519.py Normal file
View File

@@ -0,0 +1,105 @@
import hashlib
b = 256
q = 2**255 - 19
l = 2**252 + 27742317777372353535851937790883648493
def H(m):
return hashlib.sha512(m).digest()
def expmod(b,e,m):
if e == 0: return 1
t = expmod(b,e/2,m)**2 % m
if e & 1: t = (t*b) % m
return t
def inv(x):
return expmod(x,q-2,q)
d = -121665 * inv(121666)
I = expmod(2,(q-1)/4,q)
def xrecover(y):
xx = (y*y-1) * inv(d*y*y+1)
x = expmod(xx,(q+3)/8,q)
if (x*x - xx) % q != 0: x = (x*I) % q
if x % 2 != 0: x = q-x
return x
By = 4 * inv(5)
Bx = xrecover(By)
B = [Bx % q,By % q]
def edwards(P,Q):
x1 = P[0]
y1 = P[1]
x2 = Q[0]
y2 = Q[1]
x3 = (x1*y2+x2*y1) * inv(1+d*x1*x2*y1*y2)
y3 = (y1*y2+x1*x2) * inv(1-d*x1*x2*y1*y2)
return [x3 % q,y3 % q]
def scalarmult(P,e):
if e == 0: return [0,1]
Q = scalarmult(P,e/2)
Q = edwards(Q,Q)
if e & 1: Q = edwards(Q,P)
return Q
def encodeint(y):
bits = [(y >> i) & 1 for i in range(b)]
return ''.join([chr(sum([bits[i * 8 + j] << j for j in range(8)])) for i in range(b/8)])
def encodepoint(P):
x = P[0]
y = P[1]
bits = [(y >> i) & 1 for i in range(b - 1)] + [x & 1]
return ''.join([chr(sum([bits[i * 8 + j] << j for j in range(8)])) for i in range(b/8)])
def bit(h,i):
return (ord(h[i/8]) >> (i%8)) & 1
def publickey(sk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
A = scalarmult(B,a)
return encodepoint(A)
def Hint(m):
h = H(m)
return sum(2**i * bit(h,i) for i in range(2*b))
def signature(m,sk,pk):
h = H(sk)
a = 2**(b-2) + sum(2**i * bit(h,i) for i in range(3,b-2))
r = Hint(''.join([h[i] for i in range(b/8,b/4)]) + m)
R = scalarmult(B,r)
S = (r + Hint(encodepoint(R) + pk + m) * a) % l
return encodepoint(R) + encodeint(S)
def isoncurve(P):
x = P[0]
y = P[1]
return (-x*x + y*y - 1 - d*x*x*y*y) % q == 0
def decodeint(s):
return sum(2**i * bit(s,i) for i in range(0,b))
def decodepoint(s):
y = sum(2**i * bit(s,i) for i in range(0,b-1))
x = xrecover(y)
if x & 1 != bit(s,b-1): x = q-x
P = [x,y]
if not isoncurve(P): raise Exception("decoding point that is not on curve")
return P
def checkvalid(s,m,pk):
if len(s) != b/4: raise Exception("signature length is wrong")
if len(pk) != b/8: raise Exception("public-key length is wrong")
R = decodepoint(s[0:b/8])
A = decodepoint(pk)
S = decodeint(s[b/8:b/4])
h = Hint(encodepoint(R) + pk + m)
if scalarmult(B,S) != edwards(R,scalarmult(A,h)):
raise Exception("signature does not pass verification")

View File

@@ -0,0 +1,4 @@
from .jsonpath import *
from .parser import parse
__version__ = '1.3.0'

View File

@@ -0,0 +1,510 @@
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
import logging
import six
from six.moves import xrange
from itertools import *
logger = logging.getLogger(__name__)
# Turn on/off the automatic creation of id attributes
# ... could be a kwarg pervasively but uses are rare and simple today
auto_id_field = None
class JSONPath(object):
"""
The base class for JSONPath abstract syntax; those
methods stubbed here are the interface to supported
JSONPath semantics.
"""
def find(self, data):
"""
All `JSONPath` types support `find()`, which returns an iterable of `DatumInContext`s.
They keep track of the path followed to the current location, so if the calling code
has some opinion about that, it can be passed in here as a starting point.
"""
raise NotImplementedError()
def update(self, data, val):
"Returns `data` with the specified path replaced by `val`"
raise NotImplementedError()
def child(self, child):
"""
Equivalent to Child(self, next) but with some canonicalization
"""
if isinstance(self, This) or isinstance(self, Root):
return child
elif isinstance(child, This):
return self
elif isinstance(child, Root):
return child
else:
return Child(self, child)
def make_datum(self, value):
if isinstance(value, DatumInContext):
return value
else:
return DatumInContext(value, path=Root(), context=None)
class DatumInContext(object):
"""
Represents a datum along a path from a context.
Essentially a zipper but with a structure represented by JSONPath,
and where the context is more of a parent pointer than a proper
representation of the context.
For quick-and-dirty work, this proxies any non-special attributes
to the underlying datum, but the actual datum can (and usually should)
be retrieved via the `value` attribute.
To place `datum` within another, use `datum.in_context(context=..., path=...)`
which extends the path. If the datum already has a context, it places the entire
context within that passed in, so an object can be built from the inside
out.
"""
@classmethod
def wrap(cls, data):
if isinstance(data, cls):
return data
else:
return cls(data)
def __init__(self, value, path=None, context=None):
self.value = value
self.path = path or This()
self.context = None if context is None else DatumInContext.wrap(context)
def in_context(self, context, path):
context = DatumInContext.wrap(context)
if self.context:
return DatumInContext(value=self.value, path=self.path, context=context.in_context(path=path, context=context))
else:
return DatumInContext(value=self.value, path=path, context=context)
@property
def full_path(self):
return self.path if self.context is None else self.context.full_path.child(self.path)
@property
def id_pseudopath(self):
"""
Looks like a path, but with ids stuck in when available
"""
try:
pseudopath = Fields(str(self.value[auto_id_field]))
except (TypeError, AttributeError, KeyError): # This may not be all the interesting exceptions
pseudopath = self.path
if self.context:
return self.context.id_pseudopath.child(pseudopath)
else:
return pseudopath
def __repr__(self):
return '%s(value=%r, path=%r, context=%r)' % (self.__class__.__name__, self.value, self.path, self.context)
def __eq__(self, other):
return isinstance(other, DatumInContext) and other.value == self.value and other.path == self.path and self.context == other.context
class AutoIdForDatum(DatumInContext):
"""
This behaves like a DatumInContext, but the value is
always the path leading up to it, not including the "id",
and with any "id" fields along the way replacing the prior
segment of the path
For example, it will make "foo.bar.id" return a datum
that behaves like DatumInContext(value="foo.bar", path="foo.bar.id").
This is disabled by default; it can be turned on by
settings the `auto_id_field` global to a value other
than `None`.
"""
def __init__(self, datum, id_field=None):
"""
Invariant is that datum.path is the path from context to datum. The auto id
will either be the id in the datum (if present) or the id of the context
followed by the path to the datum.
The path to this datum is always the path to the context, the path to the
datum, and then the auto id field.
"""
self.datum = datum
self.id_field = id_field or auto_id_field
@property
def value(self):
return str(self.datum.id_pseudopath)
@property
def path(self):
return self.id_field
@property
def context(self):
return self.datum
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.datum)
def in_context(self, context, path):
return AutoIdForDatum(self.datum.in_context(context=context, path=path))
def __eq__(self, other):
return isinstance(other, AutoIdForDatum) and other.datum == self.datum and self.id_field == other.id_field
class Root(JSONPath):
"""
The JSONPath referring to the "root" object. Concrete syntax is '$'.
The root is the topmost datum without any context attached.
"""
def find(self, data):
if not isinstance(data, DatumInContext):
return [DatumInContext(data, path=Root(), context=None)]
else:
if data.context is None:
return [DatumInContext(data.value, context=None, path=Root())]
else:
return Root().find(data.context)
def update(self, data, val):
return val
def __str__(self):
return '$'
def __repr__(self):
return 'Root()'
def __eq__(self, other):
return isinstance(other, Root)
class This(JSONPath):
"""
The JSONPath referring to the current datum. Concrete syntax is '@'.
"""
def find(self, datum):
return [DatumInContext.wrap(datum)]
def update(self, data, val):
return val
def __str__(self):
return '`this`'
def __repr__(self):
return 'This()'
def __eq__(self, other):
return isinstance(other, This)
class Child(JSONPath):
"""
JSONPath that first matches the left, then the right.
Concrete syntax is <left> '.' <right>
"""
def __init__(self, left, right):
self.left = left
self.right = right
def find(self, datum):
"""
Extra special case: auto ids do not have children,
so cut it off right now rather than auto id the auto id
"""
return [submatch
for subdata in self.left.find(datum)
if not isinstance(subdata, AutoIdForDatum)
for submatch in self.right.find(subdata)]
def __eq__(self, other):
return isinstance(other, Child) and self.left == other.left and self.right == other.right
def __str__(self):
return '%s.%s' % (self.left, self.right)
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.left, self.right)
class Parent(JSONPath):
"""
JSONPath that matches the parent node of the current match.
Will crash if no such parent exists.
Available via named operator `parent`.
"""
def find(self, datum):
datum = DatumInContext.wrap(datum)
return [datum.context]
def __eq__(self, other):
return isinstance(other, Parent)
def __str__(self):
return '`parent`'
def __repr__(self):
return 'Parent()'
class Where(JSONPath):
"""
JSONPath that first matches the left, and then
filters for only those nodes that have
a match on the right.
WARNING: Subject to change. May want to have "contains"
or some other better word for it.
"""
def __init__(self, left, right):
self.left = left
self.right = right
def find(self, data):
return [subdata for subdata in self.left.find(data) if self.right.find(data)]
def __str__(self):
return '%s where %s' % (self.left, self.right)
def __eq__(self, other):
return isinstance(other, Where) and other.left == self.left and other.right == self.right
class Descendants(JSONPath):
"""
JSONPath that matches first the left expression then any descendant
of it which matches the right expression.
"""
def __init__(self, left, right):
self.left = left
self.right = right
def find(self, datum):
# <left> .. <right> ==> <left> . (<right> | *..<right> | [*]..<right>)
#
# With with a wonky caveat that since Slice() has funky coercions
# we cannot just delegate to that equivalence or we'll hit an
# infinite loop. So right here we implement the coercion-free version.
# Get all left matches into a list
left_matches = self.left.find(datum)
if not isinstance(left_matches, list):
left_matches = [left_matches]
def match_recursively(datum):
right_matches = self.right.find(datum)
# Manually do the * or [*] to avoid coercion and recurse just the right-hand pattern
if isinstance(datum.value, list):
recursive_matches = [submatch
for i in range(0, len(datum.value))
for submatch in match_recursively(DatumInContext(datum.value[i], context=datum, path=Index(i)))]
elif isinstance(datum.value, dict):
recursive_matches = [submatch
for field in datum.value.keys()
for submatch in match_recursively(DatumInContext(datum.value[field], context=datum, path=Fields(field)))]
else:
recursive_matches = []
return right_matches + list(recursive_matches)
# TODO: repeatable iterator instead of list?
return [submatch
for left_match in left_matches
for submatch in match_recursively(left_match)]
def is_singular():
return False
def __str__(self):
return '%s..%s' % (self.left, self.right)
def __eq__(self, other):
return isinstance(other, Descendants) and self.left == other.left and self.right == other.right
class Union(JSONPath):
"""
JSONPath that returns the union of the results of each match.
This is pretty shoddily implemented for now. The nicest semantics
in case of mismatched bits (list vs atomic) is to put
them all in a list, but I haven't done that yet.
WARNING: Any appearance of this being the _concatenation_ is
coincidence. It may even be a bug! (or laziness)
"""
def __init__(self, left, right):
self.left = left
self.right = right
def is_singular(self):
return False
def find(self, data):
return self.left.find(data) + self.right.find(data)
class Intersect(JSONPath):
"""
JSONPath for bits that match *both* patterns.
This can be accomplished a couple of ways. The most
efficient is to actually build the intersected
AST as in building a state machine for matching the
intersection of regular languages. The next
idea is to build a filtered data and match against
that.
"""
def __init__(self, left, right):
self.left = left
self.right = right
def is_singular(self):
return False
def find(self, data):
raise NotImplementedError()
class Fields(JSONPath):
"""
JSONPath referring to some field of the current object.
Concrete syntax ix comma-separated field names.
WARNING: If '*' is any of the field names, then they will
all be returned.
"""
def __init__(self, *fields):
self.fields = fields
def get_field_datum(self, datum, field):
if field == auto_id_field:
return AutoIdForDatum(datum)
else:
try:
field_value = datum.value[field] # Do NOT use `val.get(field)` since that confuses None as a value and None due to `get`
return DatumInContext(value=field_value, path=Fields(field), context=datum)
except (TypeError, KeyError, AttributeError):
return None
def reified_fields(self, datum):
if '*' not in self.fields:
return self.fields
else:
try:
fields = tuple(datum.value.keys())
return fields if auto_id_field is None else fields + (auto_id_field,)
except AttributeError:
return ()
def find(self, datum):
datum = DatumInContext.wrap(datum)
return [field_datum
for field_datum in [self.get_field_datum(datum, field) for field in self.reified_fields(datum)]
if field_datum is not None]
def __str__(self):
return ','.join(self.fields)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ','.join(map(repr, self.fields)))
def __eq__(self, other):
return isinstance(other, Fields) and tuple(self.fields) == tuple(other.fields)
class Index(JSONPath):
"""
JSONPath that matches indices of the current datum, or none if not large enough.
Concrete syntax is brackets.
WARNING: If the datum is not long enough, it will not crash but will not match anything.
NOTE: For the concrete syntax of `[*]`, the abstract syntax is a Slice() with no parameters (equiv to `[:]`
"""
def __init__(self, index):
self.index = index
def find(self, datum):
datum = DatumInContext.wrap(datum)
if len(datum.value) > self.index:
return [DatumInContext(datum.value[self.index], path=self, context=datum)]
else:
return []
def __eq__(self, other):
return isinstance(other, Index) and self.index == other.index
def __str__(self):
return '[%i]' % self.index
class Slice(JSONPath):
"""
JSONPath matching a slice of an array.
Because of a mismatch between JSON and XML when schema-unaware,
this always returns an iterable; if the incoming data
was not a list, then it returns a one element list _containing_ that
data.
Consider these two docs, and their schema-unaware translation to JSON:
<a><b>hello</b></a> ==> {"a": {"b": "hello"}}
<a><b>hello</b><b>goodbye</b></a> ==> {"a": {"b": ["hello", "goodbye"]}}
If there were a schema, it would be known that "b" should always be an
array (unless the schema were wonky, but that is too much to fix here)
so when querying with JSON if the one writing the JSON knows that it
should be an array, they can write a slice operator and it will coerce
a non-array value to an array.
This may be a bit unfortunate because it would be nice to always have
an iterator, but dictionaries and other objects may also be iterable,
so this is the compromise.
"""
def __init__(self, start=None, end=None, step=None):
self.start = start
self.end = end
self.step = step
def find(self, datum):
datum = DatumInContext.wrap(datum)
# Here's the hack. If it is a dictionary or some kind of constant,
# put it in a single-element list
if (isinstance(datum.value, dict) or isinstance(datum.value, six.integer_types) or isinstance(datum.value, six.string_types)):
return self.find(DatumInContext([datum.value], path=datum.path, context=datum.context))
# Some iterators do not support slicing but we can still
# at least work for '*'
if self.start == None and self.end == None and self.step == None:
return [DatumInContext(datum.value[i], path=Index(i), context=datum) for i in xrange(0, len(datum.value))]
else:
return [DatumInContext(datum.value[i], path=Index(i), context=datum) for i in range(0, len(datum.value))[self.start:self.end:self.step]]
def __str__(self):
if self.start == None and self.end == None and self.step == None:
return '[*]'
else:
return '[%s%s%s]' % (self.start or '',
':%d'%self.end if self.end else '',
':%d'%self.step if self.step else '')
def __repr__(self):
return '%s(start=%r,end=%r,step=%r)' % (self.__class__.__name__, self.start, self.end, self.step)
def __eq__(self, other):
return isinstance(other, Slice) and other.start == self.start and self.end == other.end and other.step == self.step

View File

@@ -0,0 +1,171 @@
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
import sys
import logging
import ply.lex
logger = logging.getLogger(__name__)
class JsonPathLexerError(Exception):
pass
class JsonPathLexer(object):
'''
A Lexical analyzer for JsonPath.
'''
def __init__(self, debug=False):
self.debug = debug
if self.__doc__ == None:
raise JsonPathLexerError('Docstrings have been removed! By design of PLY, jsonpath-rw requires docstrings. You must not use PYTHONOPTIMIZE=2 or python -OO.')
def tokenize(self, string):
'''
Maps a string to an iterator over tokens. In other words: [char] -> [token]
'''
new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger)
new_lexer.latest_newline = 0
new_lexer.string_value = None
new_lexer.input(string)
while True:
t = new_lexer.token()
if t is None: break
t.col = t.lexpos - new_lexer.latest_newline
yield t
if new_lexer.string_value is not None:
raise JsonPathLexerError('Unexpected EOF in string literal or identifier')
# ============== PLY Lexer specification ==================
#
# This probably should be private but:
# - the parser requires access to `tokens` (perhaps they should be defined in a third, shared dependency)
# - things like `literals` might be a legitimate part of the public interface.
#
# Anyhow, it is pythonic to give some rope to hang oneself with :-)
literals = ['*', '.', '[', ']', '(', ')', '$', ',', ':', '|', '&']
reserved_words = { 'where': 'WHERE' }
tokens = ['DOUBLEDOT', 'NUMBER', 'ID', 'NAMED_OPERATOR'] + list(reserved_words.values())
states = [ ('singlequote', 'exclusive'),
('doublequote', 'exclusive'),
('backquote', 'exclusive') ]
# Normal lexing, rather easy
t_DOUBLEDOT = r'\.\.'
t_ignore = ' \t'
def t_ID(self, t):
r'[a-zA-Z_@][a-zA-Z0-9_@\-]*'
t.type = self.reserved_words.get(t.value, 'ID')
return t
def t_NUMBER(self, t):
r'-?\d+'
t.value = int(t.value)
return t
# Single-quoted strings
t_singlequote_ignore = ''
def t_singlequote(self, t):
r"'"
t.lexer.string_start = t.lexer.lexpos
t.lexer.string_value = ''
t.lexer.push_state('singlequote')
def t_singlequote_content(self, t):
r"[^'\\]+"
t.lexer.string_value += t.value
def t_singlequote_escape(self, t):
r'\\.'
t.lexer.string_value += t.value[1]
def t_singlequote_end(self, t):
r"'"
t.value = t.lexer.string_value
t.type = 'ID'
t.lexer.string_value = None
t.lexer.pop_state()
return t
def t_singlequote_error(self, t):
raise JsonPathLexerError('Error on line %s, col %s while lexing singlequoted field: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
# Double-quoted strings
t_doublequote_ignore = ''
def t_doublequote(self, t):
r'"'
t.lexer.string_start = t.lexer.lexpos
t.lexer.string_value = ''
t.lexer.push_state('doublequote')
def t_doublequote_content(self, t):
r'[^"\\]+'
t.lexer.string_value += t.value
def t_doublequote_escape(self, t):
r'\\.'
t.lexer.string_value += t.value[1]
def t_doublequote_end(self, t):
r'"'
t.value = t.lexer.string_value
t.type = 'ID'
t.lexer.string_value = None
t.lexer.pop_state()
return t
def t_doublequote_error(self, t):
raise JsonPathLexerError('Error on line %s, col %s while lexing doublequoted field: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
# Back-quoted "magic" operators
t_backquote_ignore = ''
def t_backquote(self, t):
r'`'
t.lexer.string_start = t.lexer.lexpos
t.lexer.string_value = ''
t.lexer.push_state('backquote')
def t_backquote_escape(self, t):
r'\\.'
t.lexer.string_value += t.value[1]
def t_backquote_content(self, t):
r"[^`\\]+"
t.lexer.string_value += t.value
def t_backquote_end(self, t):
r'`'
t.value = t.lexer.string_value
t.type = 'NAMED_OPERATOR'
t.lexer.string_value = None
t.lexer.pop_state()
return t
def t_backquote_error(self, t):
raise JsonPathLexerError('Error on line %s, col %s while lexing backquoted operator: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
# Counting lines, handling errors
def t_newline(self, t):
r'\n'
t.lexer.lineno += 1
t.lexer.latest_newline = t.lexpos
def t_error(self, t):
raise JsonPathLexerError('Error on line %s, col %s: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
if __name__ == '__main__':
logging.basicConfig()
lexer = JsonPathLexer(debug=True)
for token in lexer.tokenize(sys.stdin.read()):
print('%-20s%s' % (token.value, token.type))

View File

@@ -0,0 +1,187 @@
from __future__ import print_function, absolute_import, division, generators, nested_scopes
import sys
import os.path
import logging
import ply.yacc
from jsonpath_rw.jsonpath import *
from jsonpath_rw.lexer import JsonPathLexer
logger = logging.getLogger(__name__)
def parse(string):
return JsonPathParser().parse(string)
class JsonPathParser(object):
'''
An LALR-parser for JsonPath
'''
tokens = JsonPathLexer.tokens
def __init__(self, debug=False, lexer_class=None):
if self.__doc__ == None:
raise Exception('Docstrings have been removed! By design of PLY, jsonpath-rw requires docstrings. You must not use PYTHONOPTIMIZE=2 or python -OO.')
self.debug = debug
self.lexer_class = lexer_class or JsonPathLexer # Crufty but works around statefulness in PLY
def parse(self, string, lexer = None):
lexer = lexer or self.lexer_class()
return self.parse_token_stream(lexer.tokenize(string))
def parse_token_stream(self, token_iterator, start_symbol='jsonpath'):
# Since PLY has some crufty aspects and dumps files, we try to keep them local
# However, we need to derive the name of the output Python file :-/
output_directory = os.path.dirname(__file__)
try:
module_name = os.path.splitext(os.path.split(__file__)[1])[0]
except:
module_name = __name__
parsing_table_module = '_'.join([module_name, start_symbol, 'parsetab'])
# And we regenerate the parse table every time; it doesn't actually take that long!
new_parser = ply.yacc.yacc(module=self,
debug=self.debug,
tabmodule = parsing_table_module,
outputdir = output_directory,
write_tables=0,
start = start_symbol,
errorlog = logger)
return new_parser.parse(lexer = IteratorToTokenStream(token_iterator))
# ===================== PLY Parser specification =====================
precedence = [
('left', ','),
('left', 'DOUBLEDOT'),
('left', '.'),
('left', '|'),
('left', '&'),
('left', 'WHERE'),
]
def p_error(self, t):
raise Exception('Parse error at %s:%s near token %s (%s)' % (t.lineno, t.col, t.value, t.type))
def p_jsonpath_binop(self, p):
"""jsonpath : jsonpath '.' jsonpath
| jsonpath DOUBLEDOT jsonpath
| jsonpath WHERE jsonpath
| jsonpath '|' jsonpath
| jsonpath '&' jsonpath"""
op = p[2]
if op == '.':
p[0] = Child(p[1], p[3])
elif op == '..':
p[0] = Descendants(p[1], p[3])
elif op == 'where':
p[0] = Where(p[1], p[3])
elif op == '|':
p[0] = Union(p[1], p[3])
elif op == '&':
p[0] = Intersect(p[1], p[3])
def p_jsonpath_fields(self, p):
"jsonpath : fields_or_any"
p[0] = Fields(*p[1])
def p_jsonpath_named_operator(self, p):
"jsonpath : NAMED_OPERATOR"
if p[1] == 'this':
p[0] = This()
elif p[1] == 'parent':
p[0] = Parent()
else:
raise Exception('Unknown named operator `%s` at %s:%s' % (p[1], p.lineno(1), p.lexpos(1)))
def p_jsonpath_root(self, p):
"jsonpath : '$'"
p[0] = Root()
def p_jsonpath_idx(self, p):
"jsonpath : '[' idx ']'"
p[0] = p[2]
def p_jsonpath_slice(self, p):
"jsonpath : '[' slice ']'"
p[0] = p[2]
def p_jsonpath_fieldbrackets(self, p):
"jsonpath : '[' fields ']'"
p[0] = Fields(*p[2])
def p_jsonpath_child_fieldbrackets(self, p):
"jsonpath : jsonpath '[' fields ']'"
p[0] = Child(p[1], Fields(*p[3]))
def p_jsonpath_child_idxbrackets(self, p):
"jsonpath : jsonpath '[' idx ']'"
p[0] = Child(p[1], p[3])
def p_jsonpath_child_slicebrackets(self, p):
"jsonpath : jsonpath '[' slice ']'"
p[0] = Child(p[1], p[3])
def p_jsonpath_parens(self, p):
"jsonpath : '(' jsonpath ')'"
p[0] = p[2]
# Because fields in brackets cannot be '*' - that is reserved for array indices
def p_fields_or_any(self, p):
"""fields_or_any : fields
| '*' """
if p[1] == '*':
p[0] = ['*']
else:
p[0] = p[1]
def p_fields_id(self, p):
"fields : ID"
p[0] = [p[1]]
def p_fields_comma(self, p):
"fields : fields ',' fields"
p[0] = p[1] + p[3]
def p_idx(self, p):
"idx : NUMBER"
p[0] = Index(p[1])
def p_slice_any(self, p):
"slice : '*'"
p[0] = Slice()
def p_slice(self, p): # Currently does not support `step`
"slice : maybe_int ':' maybe_int"
p[0] = Slice(start=p[1], end=p[3])
def p_maybe_int(self, p):
"""maybe_int : NUMBER
| empty"""
p[0] = p[1]
def p_empty(self, p):
'empty :'
p[0] = None
class IteratorToTokenStream(object):
def __init__(self, iterator):
self.iterator = iterator
def token(self):
try:
return next(self.iterator)
except StopIteration:
return None
if __name__ == '__main__':
logging.basicConfig()
parser = JsonPathParser(debug=True)
print(parser.parse(sys.stdin.read()))

Some files were not shown because too many files have changed in this diff Show More