Compare commits

...

270 Commits

Author SHA1 Message Date
Vinnie Falco
05a04aa801 Set version to 0.26.4 2014-11-03 16:53:37 -08:00
Vinnie Falco
6591c21ace Set version to 0.26.4-rc4 2014-10-27 11:49:39 -07:00
Vinnie Falco
e8d03c7b9b Update rocksdb unity file 2014-10-27 11:48:21 -07:00
Vinnie Falco
6fbce4c2f7 Update src/rocksdb2 to rocksdb-3.5.1:
Merge commit 'c168d54495d7d7b84639514f6443ad99b89ce996' into develop
2014-10-27 11:37:01 -07:00
Vinnie Falco
c168d54495 Squashed 'src/rocksdb2/' changes from 25888ae..1fdd726
1fdd726 Hotfix RocksDB 3.5
d67500a Add `make install` to Makefile in 3.5.fb.
4cb631a update HISTORY.md
cfd0946 comments about the BlockBasedTableOptions migration in Options
REVERT: 25888ae Merge pull request #329 from fyrz/master
REVERT: 89833e5 Fixed signed-unsigned comparison warning in db_test.cc
REVERT: fcac705 Fixed compile warning on Mac caused by unused variables.
REVERT: b3343fd resolution for java build problem introduced by 5ec53f3edf62bec1b690ce12fb21a6c52203f3c8
REVERT: 187b299 ForwardIterator: update prev_key_ only if prefix hasn't changed
REVERT: 5ec53f3 make compaction related options changeable
REVERT: d122e7b Update INSTALL.md
REVERT: 986dad0 Merge pull request #324 from dalgaaf/wip-da-SCA-20140930
REVERT: 8ee75dc db/memtable.cc: remove unused variable merge_result
REVERT: 0fd8bbc db/db_impl.cc: reduce scope of prefix_initialized
REVERT: 676ff7b compaction_picker.cc: remove check for >=0 for unsigned
REVERT: e55aea5 document_db.cc: fix assert
REVERT: d517c83 in_table_factory.cc: use correct format specifier
REVERT: b140375 ttl/ttl_test.cc: prefer prefix ++operator for non-primitive types
REVERT: 43c789c spatialdb/spatial_db.cc: use !empty() instead of 'size() > 0'
REVERT: 0de452e document_db.cc: pass const parameter by reference
REVERT: 4cc8643 util/ldb_cmd.cc: prefer prefix ++operator for non-primitive types
REVERT: af8c2b2 util/signal_test.cc: suppress intentional null pointer deref
REVERT: 33580fa db/db_impl.cc: fix object handling, remove double lines
REVERT: 873f135 db_ttl_impl.h: pass func parameter by reference
REVERT: 8558457 ldb_cmd_execute_result.h: perform init in initialization list
REVERT: 063471b table/table_test.cc: pass func parameter by reference
REVERT: 93548ce table/cuckoo_table_reader.cc: pass func parameter by ref
REVERT: b8b7117 db/version_set.cc: use !empty() instead of 'size() > 0'
REVERT: 8ce050b table/bloom_block.*: pass func parameter by reference
REVERT: 53910dd db_test.cc: pass parameter by reference
REVERT: 68ca534 corruption_test.cc: pass parameter by reference
REVERT: 7506198 cuckoo_table_db_test.cc: add flush after delete
REVERT: 1f96330 Print MB per second compaction throughput separately for reads and writes
REVERT: ffe3d49 Add an instruction about SSE in INSTALL.md
REVERT: ee1f3cc Package generation for Ubuntu and CentOS
REVERT: f0f7955 Fixing comile errors on OS X
REVERT: 99fb613 remove 2 space linter
REVERT: b2d64a4 Fix linters, second try
REVERT: 747523d Print per column family metrics in db_bench
REVERT: 56ebd40 Fix arc lint (should fix #238)
REVERT: 637f891 Merge pull request #321 from eonnen/master
REVERT: 827e31c Make test use a compatible type in the size checks.
REVERT: fd5d80d CompactedDB: log using the correct info_log
REVERT: 2faf49d use GetContext to replace callback function pointer
REVERT: 983d2de Add AUTHORS file. Fix #203
REVERT: abd70c5 Merge pull request #316 from fyrz/ReverseBytewiseComparator
REVERT: 2dc6f62 handle kDelete type in cuckoo builder
REVERT: 8b8011a Changed name of ReverseBytewiseComparator based on review comment
REVERT: 389edb6 universal compaction picker: use double for potential overflow
REVERT: 5340484 Built-in comparator(s) in RocksJava
REVERT: d439451 delay initialization of cuckoo table iterator
REVERT: 94997ea reduce memory usage of cuckoo table builder
REVERT: c627595 improve memory efficiency of cuckoo reader
REVERT: 581442d option to choose module when calculating CuckooTable hash
REVERT: fbd2daf CompactedDBImpl::MultiGet() for better CuckooTable performance
REVERT: 3c68006 CompactedDBImpl
REVERT: f7375f3 Fix double deletes
REVERT: 21ddcf6 Remove allow_thread_local
REVERT: fb4a492 Merge pull request #311 from ankgup87/master
REVERT: 611e286 Merge branch 'master' of https://github.com/facebook/rocksdb
REVERT: 0103b44 Merge branch 'master' of ssh://github.com/ankgup87/rocksdb
REVERT: 1dfb7bb Add block based table config options
REVERT: cdaf44f Enlarge log size cap when printing file summary
REVERT: 7cc1ed7 Merge pull request #309 from naveenatceg/staticbuild
REVERT: ba6d660 Resolving merge conflict
REVERT: 51eeaf6 Addressing review comments
REVERT: fd7d3fe Addressing review comments (adding a env variable to override temp directory)
REVERT: cf7ace8 Addressing review comments
REVERT: 0a29ce5 re-enable BlockBasedTable::SetupForCompaction()
REVERT: 55af370 Remove TODO for checking index checksums
REVERT: 3d74f09 Fix compile
REVERT: 53b0039 Fix release compile
REVERT: d0de413 WriteBatchWithIndex to allow different Comparators for different column families
REVERT: 57a32f1 change target_file_size_base to uint64_t
REVERT: 5e6aee4 dont create backup_input if compaction filter v2 is not used
REVERT: 49b5f94 Merge pull request #306 from Liuchang0812/fix_cast
REVERT: 787cb4d remove cast, replace %llu with % PRIu64
REVERT: a7574d4 Update logging.cc
REVERT: 7e0dcb9 Update logging.cc
REVERT: 57fa3cc Merge pull request #304 from Liuchang0812/fix-check
REVERT: cd44522 Merge pull request #305 from Liuchang0812/fix-logging
REVERT: 6a031b6 remove unused variable
REVERT: 4436f17 fixed #303: replace %ld with % PRId64
REVERT: 7a1bd05 Merge pull request #302 from ankgup87/master
REVERT: 423e52c Merge branch 'master' of https://github.com/facebook/rocksdb
REVERT: bfeef94 Add rate limiter
REVERT: 32f2532 Print compression_size_percent as a signed int
REVERT: 976caca Skip AllocateTest if fallocate() is not supported in the file system
REVERT: 3b897cd Enable no-fbcode RocksDB build
REVERT: f445947 RocksDB: Format uint64 using PRIu64 in db_impl.cc
REVERT: e17bc65 Merge pull request #299 from ankgup87/master
REVERT: b93797a Fix build
REVERT: adae3ca [Java] Fix JNI link error caused by the removal of options.db_stats_log_interval
REVERT: 90b8c07 Fix unit tests errors
REVERT: 51af7c3 CuckooTable: add one option to allow identity function for the first hash function
REVERT: 0350435 Fixed a signed-unsigned comparison in spatial_db.cc -- issue #293
REVERT: 2fb1fea Fix syncronization issues
REVERT: ff76895 Remove some unnecessary constructors
REVERT: feadb9d fix cuckoo table builder test
REVERT: 3c232e1 Fix mac compile
REVERT: 54cada9 Run make format on PR #249
REVERT: 27b22f1 Merge pull request #249 from tdfischer/decompression-refactoring
REVERT: fb6456b Replace naked calls to operator new and delete (Fixes #222)
REVERT: 5600c8f cuckoo table: return estimated size - 1
REVERT: a062e1f SetOptions() for memtable related options
REVERT: e4eca6a Options conversion function for convenience
REVERT: a7c2094 Merge pull request #292 from saghmrossi/master
REVERT: 4d05234 Merge branch 'master' of github.com:saghmrossi/rocksdb
REVERT: 60a4aa1 Test use_mmap_reads
REVERT: 94e43a1 [Java] Fixed 32-bit overflowing issue when converting jlong to size_t
REVERT: f9eaaa6 added include for inttypes.h to fix nonworking printf statements
REVERT: f090575 Replaced "built on on earlier work" by "built on earlier work" in README.md
REVERT: faad439 Fix #284
REVERT: 49aacd8 Fix make install
REVERT: acb9348 [Java] Include WriteBatch into RocksDBSample.java, fix how DbBenchmark.java handles WriteBatch.
REVERT: 4a27a2f Don't sync manifest when disableDataSync = true
REVERT: 9b8480d Merge pull request #287 from yinqiwen/rate-limiter-crash-fix
REVERT: 28be16b fix rate limiter crash #286
REVERT: 04ce1b2 Fix #284
REVERT: add22e3 standardize scripts to run RocksDB benchmarks
REVERT: dee91c2 WriteThread
REVERT: 540a257 Fix WAL synced
REVERT: 24f034b Merge pull request #282 from Chilledheart/develop
REVERT: 49fe329 Fix build issue under macosx
REVERT: ebb5c65 Add make install
REVERT: 0352a9f add_wrapped_bloom_test
REVERT: 9c0e66c Don't run background jobs (flush, compactions) when bg_error_ is set
REVERT: a9639bd Fix valgrind test
REVERT: d1f24dc Relax FlushSchedule test
REVERT: 3d9e6f7 Push model for flushing memtables
REVERT: 059e584 [unit test] CompactRange should fail if we don't have space
REVERT: dd641b2 fix RocksDB java build
REVERT: 53404d9 add_qps_info_in cache bench
REVERT: a52cecb Fix Mac compile
REVERT: 092f97e Fix comments and typos
REVERT: 6cc1286 Added a few statistics for BackupableDB
REVERT: 0a42295 Fix SimpleWriteTimeoutTest
REVERT: 06d9862 Always pass MergeContext as pointer, not reference
REVERT: d343c3f Improve db recovery
REVERT: 6bb7e3e Merger test
REVERT: 88841bd Explicitly cast char to signed char in Hash()
REVERT: 5231146 MemTableOptions
REVERT: 1d284db Addressing review comments
REVERT: 55114e7 Some updates for SpatialDB
REVERT: 171d4ff remove TailingIterator reference in db_impl.h
REVERT: 9b0f7ff rename version_set options_ to db_options_ to avoid confusion
REVERT: 2d57828 Check stop level trigger-0 before slowdown level-0 trigger
REVERT: 659d2d5 move compaction_filter to immutable_options
REVERT: 048560a reduce references to cfd->options() in DBImpl
REVERT: 011241b DB::Flush() Do not wait for background threads when there is nothing in mem table
REVERT: a2bb7c3 Push- instead of pull-model for managing Write stalls
REVERT: 0af157f Implement full filter for block based table.
REVERT: 9360cc6 Fix valgrind issue
REVERT: 02d5bff Merge pull request #277 from wankai/master
REVERT: 88a2f44 fix comments
REVERT: 7c16e39 Merge pull request #276 from wankai/master
REVERT: 8237738 replace hard-coded number with named variable
REVERT: db8ca52 Merge pull request #273 from nbougalis/static-analysis
REVERT: b7b031f Merge pull request #274 from wankai/master
REVERT: 4c2b1f0 Merge remote-tracking branch 'upstream/master'
REVERT: a5d2863 typo improvement
REVERT: 9f8aa09 Don't leak data returned by opendir
REVERT: d1cfb71 Remove unused member(s)
REVERT: bfee319 sizeof(int*) where sizeof(int) was intended
REVERT: d40c1f7 Add missing break statement
REVERT: 2e97c38 Avoid off-by-one error when using readlink
REVERT: 40ddc3d add cache bench
REVERT: 9f1c80b Drop column family from write thread
REVERT: 8de151b Add db_bench with lots of column families to regression tests
REVERT: c9e419c rename options_ to db_options_ in DBImpl to avoid confusion
REVERT: 5cd0576 Fix compaction bug in Cuckoo Table Builder. Use kvs_.size() instead of num_entries in FileSize() method.
REVERT: 0fbb3fa fixed memory leak in unit test DBIteratorBoundTest
REVERT: adcd253 fix asan check
REVERT: 4092b7a Merge pull request #272 from project-zerus/patch-1
REVERT: bb6ae0f fix more compile warnings
REVERT: 6d31441 Merge pull request #271 from nbougalis/cleanups
REVERT: 0cd0ec4 Plug memory leak during index creation
REVERT: 4329d74 Fix swapped variable names to accurately reflect usage
REVERT: 45a5e3e Remove path with arena==nullptr from NewInternalIterator
REVERT: 5665e5e introduce ImmutableOptions
REVERT: e0b99d4 created a new ReadOptions parameter 'iterate_upper_bound'
REVERT: 51ea889 Fix travis builds
REVERT: a481626 Relax backupable rate limiting test
REVERT: f7f973d Merge pull request #269 from huahang/patch-2
REVERT: ef5b384 fix a few compile warnings
REVERT: 2fd3806 Merge pull request #263 from wankai/master
REVERT: 1785114 delete unused Comparator
REVERT: 1b1d961 update HISTORY.md
REVERT: 703c3ea comments about the BlockBasedTableOptions migration in Options
REVERT: 4b5ad88 Merge pull request #260 from wankai/master
REVERT: 19cc588 change to filter_block std::unique_ptr support RAII
REVERT: 9b976e3 Merge pull request #259 from wankai/master
REVERT: 5d25a46 Merge remote-tracking branch 'upstream/master'
REVERT: dff2b1a typo improvement
REVERT: 343e98a Reverting import change
REVERT: ddb8039 RocksDB static build Make file changes to download and build the dependencies .Load the shared library when RocksDB is initialized

git-subtree-dir: src/rocksdb2
git-subtree-split: 1fdd726a8254c13d0c66d8db8130ad17c13d7bcc
2014-10-27 11:36:32 -07:00
Vinnie Falco
2cce22052b Update SQLite to 3.8.7:
sha1: 3e23079f062fc06705eead4db108ee429878b532
2014-10-27 11:04:46 -07:00
Tom Ritchford
4e19d5f625 Adjust paths and costs in Pathfinder. 2014-10-27 11:03:19 -07:00
Tom Ritchford
5b667da526 Squelch some warnings in rippled and third-party code. 2014-10-27 10:00:03 -07:00
Nik Bougalis
f9fc9a3518 Reduce RippleD dependencies on Beast:
* Use static_assert where appropriate
* Use std::min and std::max where appropriate
* Simplify RippleD error reporting
* Remove use of beast::RandomAccessFile
2014-10-27 09:55:58 -07:00
Nik Bougalis
e005cfd70e Reduce Beast public interface and eliminate unused code:
Beast includes a lot of code for encapsulating cross-platform differences
which are not used or needed by rippled. Additionally, a lot of that code
implements functionality that is available from the standard library.

This moves away from custom implementations of features that the standard
library provides and reduces the number of platform-specific interfaces
andfeatures that Beast makes available.

Highlights include:
* Use std:: instead of beast implementations when possible
* Reduce the use of beast::String in public interfaces
* Remove Windows-specific COM and Registry code
* Reduce the public interface of beast::File
* Reduce the public interface of beast::SystemStats
* Remove unused sysctl/getsysinfo functions
* Remove beast::Logger
2014-10-27 09:55:43 -07:00
Vinnie Falco
feb997481c Refactor the structure of ServerHandler:
This is a cleanup to the structure of the sources.
* Rename to ServerHandler
* Move private implementation declaration to separate header
* De-inline function definitions in the class declaration.
2014-10-27 09:50:03 -07:00
Vinnie Falco
2c8e90c9d8 Remove obsolete RPCServerHandler:
This removes the legacy RPCServerHandler, which has been replaced by the
asynchronous RPC-HTTP/S server and corresponding RPCHTTPHandler.
2014-10-27 09:50:03 -07:00
Vinnie Falco
ec96d5afa0 Remove unused and obsolete classes and tidy up:
Many classes required to support type-erasure of handlers and boost::asio
types are now obsolete, so these classes and files are removed:
HTTPClientType, FixedInputBuffer, PeerRole, socket_wrapper,
client_session, basic_url, abstract_socket, buffer_sequence, memory_buffer,
enable_wait_for_async, shared_handler, wrap_handler, streambuf,
ContentBodyBuffer, SSLContext, completion-handler based handshake detectors.
These structural changes are made:
* Some missing includes added to headers
* asio module directory flattened
2014-10-26 08:40:52 -07:00
Vinnie Falco
8be8853c33 Remove obsolete classes, disable unused code, and tidy up:
* Removed MultiSocket. Code that previously used the MultiSocket now uses
  a combination of boost::asio coroutines and CRTP.
* Sitefiles headers rolled up and directory flattened.
* Disabled Sitefiles use of deprecated HTTPClient.
* Validators headers tidied up.
* Disabled Validators use of deprecated HTTPClient.
2014-10-26 08:38:37 -07:00
Vinnie Falco
c228f5a244 Set version to 0.26.4-rc3 2014-10-25 08:07:40 -07:00
Vinnie Falco
d4c8b4e3ac Merge branch 'release' into develop 2014-10-25 08:07:30 -07:00
Vinnie Falco
6564f6c164 Fix incorrect socket closure in Overlay peers:
On Application exit, Overlay was calling PeerImp::close for each peer.
The implementation of PeerImp::close only canceled all pending I/O and did not
call functions necessary for proper transition of Peer state during socket
closure. The correct transition is ensured by calling PeerImp::detach. This
changes PeerImp::close to call PeerImp::detach instead, ensuring that Overlay
invariants are maintained. Specifically, that reference counts for pending I/O
on peers will be correctly unwound by canceling operations and that the Peer
object will be destroyed, thus allowing the Overlay to stop correctly.
2014-10-25 08:01:57 -07:00
Vinnie Falco
1e37a5509c Add missing includes 2014-10-24 08:13:55 -07:00
Vinnie Falco
1e9503deaa Set version to 0.26.2-rc2 2014-10-23 13:49:22 -07:00
Vinnie Falco
ab1f36c565 Revert "Add [overlay] configuration section (experimental):"
This reverts commit 856fd9d69f.
2014-10-23 13:48:52 -07:00
Vinnie Falco
5a212cd626 Set version to 0.26.4-rc1 2014-10-23 13:01:12 -07:00
Vinnie Falco
856fd9d69f Add [overlay] configuration section (experimental):
This configuration section uses the new BasicConfig interface that supports
key-value pairs in the section. Some exposition is added to the example cfg
file. The new settings for overlay are related to the Hub and Spoke feature
which is currently in development. Production servers should not set
these configuration options, they are clearly marked experimental in the
example cfg file.

Conflicts:
	src/ripple/overlay/impl/OverlayImpl.cpp
	src/ripple/overlay/impl/OverlayImpl.h
	src/ripple/overlay/impl/PeerImp.cpp
	src/ripple/overlay/impl/PeerImp.h
2014-10-23 12:56:16 -07:00
Vinnie Falco
4606d99951 Don't use MultiSocket in Overlay:
The MultiSocket is obsolete technology which is superceded by a more
straightforward, template based implementation that is compatible with
boost::asio::coroutines. This removes support for the unused PROXY handshake
feature. After this change a large number of classes and source files may be
removed.
2014-10-23 12:56:16 -07:00
Tom Ritchford
dbd75169e5 New JsonWriter for improved client performance (RIPD-439):
When JSON-RPC and Websocket responses are calculated, the result is stored
in intermediate Json::Value objects and later composed in a single linear
memory buffer before being sent to the socket.  These classes support a
new model for building responses that supports incremental construction
of JSON replies in constant time and removes the requirement that all
data returned be located in continuguous memory.
* New JsonWriter incrementally writes JSON with O(1) granularity and memory.
* Array, Object are RAII wrappers for the O(1) JsonWriter.
2014-10-23 07:04:47 -07:00
Vinnie Falco
f5b39ee911 Remove HTTP::ScopedStream:
This class was used to allow stream style operator<< to write to the
HTTP::Session. This is being superceded by a more robust object-based model
that supports coroutines.
2014-10-22 19:36:28 -07:00
Vinnie Falco
db5d52b4b2 Keep a list of section config values that are not key/value pairs:
This change to BasicConfig stores all appended lines which are not key/value
pairs in a separate values vector which can be retrieved later. This is to
support sections containing both key/value pairs and a list of values.
2014-10-22 19:36:28 -07:00
Vinnie Falco
dfeb9967b8 Return error_code from beast::http::basic_parser:
This changes the HTTP parser interface to return an error_code instead
of a bool. This eliminates the need for the error() member function and
simplifies calling code.
2014-10-22 19:36:28 -07:00
Vinnie Falco
673e860c18 Add beast::asio::ssl_bundle workaround:
This works around the limitation that 1.56 boost::asio::ssl::stream objects
do not support r-value move or construction. It is required when the stream
does not own the socket.
2014-10-22 19:36:28 -07:00
Vinnie Falco
9deae34b20 Workaround for MSVC stdlib and coroutine interaction:
If beast::Time::currentTimeMillis is first called from a coroutine launched
using boost::asio::spawn, Win32 throws an exception. This workaround calls
getCurrentTime once in main to prevent the exception.
Reference:
    https://svn.boost.org/trac/boost/ticket/10657
2014-10-22 19:36:27 -07:00
Vinnie Falco
ec92344fb4 Use autotls instead of multitls in websocket:
The MultiSocket class implements a socket that handshakes in multiple
protocols including SSL and PROXY. Unfortunately the way it type-erases the
handlers and buffers is incompatible with boost::asio coroutines. To pave the
way for coroutines this is part of a larger set of changes that roll back the
usage of MultiSocket to older code, and some custom implementations that use
templates. The custom implementations are more simple since they use
coroutines. Removing MultiSocket will make many other classes and source files
unused, a big win for trimming down the codebase size.
2014-10-22 19:34:48 -07:00
Donovan Hide
44c68d6174 Change NodeStore::Backend tests to reflect observed patterns:
Empirical evidence shows a database access pattern with few hits
and many misses (objects that don't exist). This changes the timing
tests so they more accurately reflect rippled's actual usage:
* Add read missing keys test
* Increase numObjectsToTest to 1,000,000
* Alter PredictableObjectFactory to seed RNG once only
* Make NodeStoreTiming a manual test
2014-10-22 19:29:29 -07:00
Howard Hinnant
5b7f172d03 Fix OS X version parsing/error related to OS X 10.10 update. 2014-10-22 19:29:28 -07:00
JoelKatz
65125eac87 Add "deferred" flag to transaction relay message
If we receive a deferred transaction from a server in our
cluster, treat it as if it wasn't received from a server
in our cluster.

This currently has no effect but is needed for server to
interoperate with future code that will relay deferred
transactions.
2014-10-22 19:29:28 -07:00
Scott Schurr
761902864a Refactor STParsedJSON to parse an object or array [RIPD-480]
The implementation of multi-sign has a SigningAccounts array as a
member of the outermost object.  This array could not be parsed
by the previous implementation of STParsedJSON, which only knew
how to parse objects.  This refactor supports the required parsing.

The refactor divides the parsing into three separate functions:

 o parseNoRecurse() which parses most rippled data types.
 o parseObject() which parses object types that may contain
   arbitrary other types.
 o parseArray() which parses object types that may contain
   arbitrary other types.

The change is required by the multi-sign implementation, but is
independent.  So the parsing change is going in as a separate
commit.

The parsing is still far from perfect.  But this was as much as
needs doing to accomplish the ends and mitigate risk of breaking
the parser.
2014-10-22 19:29:28 -07:00
Vinnie Falco
af24d541d1 Workaround for MSVC move special members. 2014-10-18 08:16:12 -07:00
Donovan Hide
3ad68a617e Fix dependency on boost::thread on OS/X. 2014-10-16 21:44:36 -04:00
Nik Bougalis
9e1a6589d4 Return descriptive error message from memo validation (RIPD-591). 2014-10-16 21:44:36 -04:00
Josh Juran
da8ceed07e RippleSSLContext.cpp cleanup.
* These cleanups precede work on RIPD-108.
2014-10-16 21:44:36 -04:00
Nik Bougalis
35935adc98 Fix URL compositing in Beast (RIPD-636). 2014-10-16 21:44:36 -04:00
Howard Hinnant
5b4a501f68 Detab beast 2014-10-15 19:39:30 -04:00
Tom Ritchford
5425a90f16 Fix tabs and trailing whitespace. 2014-10-15 19:39:30 -04:00
Donovan Hide
7eaca149c1 Remove boost_thread dependency (RIPD-216).
Fixes RIPD-216
2014-10-15 19:37:25 -04:00
Mark Travis
4b5fd95657 Disable SSLv3 2014-10-15 19:37:25 -04:00
Nik Bougalis
96dedf553e Refactor SerializedTransaction:
* Use boost:tribool instead of two intertwined bool variables
* Trim down public interface, reduce member variables
2014-10-15 19:37:25 -04:00
sublimator
23219f2662 Disable transaction submission tests under Travis. 2014-10-15 19:37:25 -04:00
Vinnie Falco
af78ed608e Call Stoppable::stopped in PeerFinder onStop. 2014-10-15 19:37:25 -04:00
Vinnie Falco
51dc59e019 Fix outgoing bytes calculation in HTTP server. 2014-10-15 19:37:25 -04:00
Tom Ritchford
afc102e90a New class RPC::Status enforces JSON-RPC 2.0 error format.
* Relevant issues:
  * RIPD-92
  * RIPD-97
  * RIPD-98
  * RIPD-439
2014-10-15 19:37:25 -04:00
David Schwartz
fc560179e0 SHAMap performance improvements (RIPD-434)
This reworks the way SHAMaps are stored, flushed, backed, and
traversed. Rather than storing the linkages in the SHAMap itself,
that information is now stored in the nodes. This makes
snapshotting much cheaper and also allows traverse work done on
behalf of one SHAMap to be used by other SHAMaps that share inner
nodes with that SHAMap.

When a SHAMap is modified, nodes are modified all the way up to the
root. This means that the modified nodes in a SHAMap can easily be
traversed for flushing. So they don't need to be separately tracked.

Summary
* Remove mTNByID
* Remove mDirtyNodes
* Much faster traverses
* Much Faster snapshots
* New algorithm for flushing
* New vistNodes/visitLeaves
* Avoid I/O if a map is unbacked
2014-10-14 13:32:17 -04:00
sublimator
d26241de0e Remove Og from debug mode
Last time I used gdb, iterating over a directory's `Indexes`, each uint256 printed as `<optimized out>`.

Debug mode is for debugging ...
2014-10-14 12:57:41 -04:00
Howard Hinnant
00310f4f10 Silence clang warnings 2014-10-14 12:35:17 -04:00
Howard Hinnant
8caae219cf Gracefully cast from std:🧵:hardware_concurrency 2014-10-14 12:35:17 -04:00
Howard Hinnant
2264ae9247 Guarantee C locale
*  Remove all calls to setlocale to ensure that the global
   locale is always C.

*  Also replace beast::SystemStats::getNumCpus() with
   std:🧵:hardware_concurrency()
2014-10-14 12:35:17 -04:00
Nicholas Dudfield
29225bbe75 Attempt to fix spurious travis failures 2014-10-14 12:35:17 -04:00
Vinnie Falco
4b5625fd59 Load PeerFinder database in Stoppable::onPrepare:
OverlayImpl::onStart calls into PeerFinder before PeerFinder::Manager::onStart,
causing tests to sometimes fail and the application to intermittently not start.
The order of calls to Stoppable::onStart is implementation defined and not
predictable.

This changes PeerFinder to load the database in Stoppable::onPrepare, before
threads are launched. In general, creation and initialization of resources that
are shared between classes should happen in onPrepare rather than onStart,
to solve this problem.
2014-10-10 19:38:52 -07:00
Vinnie Falco
7c0c2419f7 Refactor PeerFinder:
Previously, the PeerFinder manager constructed with a Callback object
provided by the owner which was used to perform operations like connecting,
disconnecting, and sending messages. This made it difficult to change the
overlay code because a single call into the PeerFinder could cause both
OverlayImpl and PeerImp to be re-entered one or more times, sometimes while
holding a recursive mutex. This change eliminates the callback by changing
PeerFinder functions to return values indicating the action the caller should
take.

As a result of this change the PeerFinder no longer needs its own dedicated
thread. OverlayImpl is changed to call into PeerFinder on a timer to perform
periodic activities. Furthermore the Checker class used to perform connectivity
checks has been refactored. It no longer uses an abstract base class, in order
to not type-erase the handler passed to async_connect (ensuring compatibility
with coroutines). To allow unit tests that don't need a network, the Logic
class is now templated on the Checker type. Currently the Manager provides its
own io_service. However, this can easily be changed so that the io_service is
provided upon construction.

Summary
* Remove unused SiteFiles dependency injection
* Remove Callback and update signatures for public APIs
* Remove obsolete functions
* Move timer to overlay
* Steps toward a shared io_service
* Templated, simplified Checker
* Tidy up Checker declaration
2014-10-10 15:04:37 -07:00
Vinnie Falco
5f59282ba1 Clean up Overlay and PeerFinder sources:
* Tidy up identifiers and declarations
* Merge PeerFinder headers into one file
* Merge handout classes and functions into one file
2014-10-10 15:04:36 -07:00
Vinnie Falco
db03ce939c Add pending_handlers 2014-10-10 13:26:08 -07:00
Vinnie Falco
68bcbbb701 Add missing include in beast header 2014-10-10 13:26:08 -07:00
David Schwartz
8bdf7b3983 Remove unused file 2014-10-10 10:27:47 -07:00
Vinnie Falco
4ab427d315 Cleanup: Combine Section and BasicConfig, move to basics 2014-10-09 14:49:10 -07:00
Vinnie Falco
9a0a434dd8 Fix incorrect address in connectivity check report:
The remoteAddress is the address as seen on the socket, which for
incoming connections has a random port chosen by the remote implementation
that is different from the port number used to accept connections by the
remote listening socket. The checkedAddress is the remote address as seen
on the socket, combined with the port advertised in the TMEndpoints message.
This fixes the reporting and metadata associated with addresses tested
for connectivity.

The README has been updated to reflect that uptime is no longer part of
the metadata associated with IP addresses saved for bootstrapping.
2014-10-09 14:48:54 -07:00
Nik Bougalis
33d1dda954 Handle BIGNUM conversion failure 2014-10-06 11:24:42 -07:00
Howard Hinnant
8e9efb4ceb Remove unused transaction code. 2014-10-06 11:18:15 -07:00
Nik Bougalis
8835af11d5 Cleanups and surface reduction:
* Don't use friendship unless needed
* Trim down interfaces
* Make classes feel more like std containers
2014-10-06 11:18:15 -07:00
Nik Bougalis
cfb6b678f1 Remove HashMaps 2014-10-02 14:58:14 -07:00
miguelportilla
365500da98 Create orderbook integration test (RIPD-483) 2014-10-02 14:58:14 -07:00
Miguel Portilla
f14d75e798 Optimize account_lines and account_offers (RIPD-587)
Conflicts:
	src/ripple/app/ledger/Ledger.h
2014-10-02 14:58:14 -07:00
Tom Ritchford
0f71b4a378 Fix most compilation warnings for gcc, clang, release, debug. 2014-10-02 14:58:14 -07:00
JoelKatz
b651e0146d Fix some fee logic: (RIPD-614)
* fee_default sets cost in drops of reference transaction
* Offline signing uses fee_default
* Signing multiplier maximum works correctly
* Fix bugs in load fee track
* Remove dead code, add comments
2014-10-02 14:58:14 -07:00
Tom Ritchford
a0dbbb2d84 Update and sort ErrorCode descriptions 2014-10-02 14:57:31 -07:00
Torrie Fischer
a85fbf69e0 Update rocksdb 2014-10-02 14:57:31 -07:00
Torrie Fischer
92b8c7961b Squashed 'src/rocksdb2/' changes from 37c6740..25888ae
25888ae Merge pull request #329 from fyrz/master
89833e5 Fixed signed-unsigned comparison warning in db_test.cc
fcac705 Fixed compile warning on Mac caused by unused variables.
b3343fd resolution for java build problem introduced by 5ec53f3edf62bec1b690ce12fb21a6c52203f3c8
187b299 ForwardIterator: update prev_key_ only if prefix hasn't changed
5ec53f3 make compaction related options changeable
d122e7b Update INSTALL.md
986dad0 Merge pull request #324 from dalgaaf/wip-da-SCA-20140930
8ee75dc db/memtable.cc: remove unused variable merge_result
0fd8bbc db/db_impl.cc: reduce scope of prefix_initialized
676ff7b compaction_picker.cc: remove check for >=0 for unsigned
e55aea5 document_db.cc: fix assert
d517c83 in_table_factory.cc: use correct format specifier
b140375 ttl/ttl_test.cc: prefer prefix ++operator for non-primitive types
43c789c spatialdb/spatial_db.cc: use !empty() instead of 'size() > 0'
0de452e document_db.cc: pass const parameter by reference
4cc8643 util/ldb_cmd.cc: prefer prefix ++operator for non-primitive types
af8c2b2 util/signal_test.cc: suppress intentional null pointer deref
33580fa db/db_impl.cc: fix object handling, remove double lines
873f135 db_ttl_impl.h: pass func parameter by reference
8558457 ldb_cmd_execute_result.h: perform init in initialization list
063471b table/table_test.cc: pass func parameter by reference
93548ce table/cuckoo_table_reader.cc: pass func parameter by ref
b8b7117 db/version_set.cc: use !empty() instead of 'size() > 0'
8ce050b table/bloom_block.*: pass func parameter by reference
53910dd db_test.cc: pass parameter by reference
68ca534 corruption_test.cc: pass parameter by reference
7506198 cuckoo_table_db_test.cc: add flush after delete
1f96330 Print MB per second compaction throughput separately for reads and writes
ffe3d49 Add an instruction about SSE in INSTALL.md
ee1f3cc Package generation for Ubuntu and CentOS
f0f7955 Fixing comile errors on OS X
99fb613 remove 2 space linter
b2d64a4 Fix linters, second try
747523d Print per column family metrics in db_bench
56ebd40 Fix arc lint (should fix #238)
637f891 Merge pull request #321 from eonnen/master
827e31c Make test use a compatible type in the size checks.
fd5d80d CompactedDB: log using the correct info_log
2faf49d use GetContext to replace callback function pointer
983d2de Add AUTHORS file. Fix #203
abd70c5 Merge pull request #316 from fyrz/ReverseBytewiseComparator
2dc6f62 handle kDelete type in cuckoo builder
8b8011a Changed name of ReverseBytewiseComparator based on review comment
389edb6 universal compaction picker: use double for potential overflow
5340484 Built-in comparator(s) in RocksJava
d439451 delay initialization of cuckoo table iterator
94997ea reduce memory usage of cuckoo table builder
c627595 improve memory efficiency of cuckoo reader
581442d option to choose module when calculating CuckooTable hash
fbd2daf CompactedDBImpl::MultiGet() for better CuckooTable performance
3c68006 CompactedDBImpl
f7375f3 Fix double deletes
21ddcf6 Remove allow_thread_local
fb4a492 Merge pull request #311 from ankgup87/master
611e286 Merge branch 'master' of https://github.com/facebook/rocksdb
0103b44 Merge branch 'master' of ssh://github.com/ankgup87/rocksdb
1dfb7bb Add block based table config options
cdaf44f Enlarge log size cap when printing file summary
7cc1ed7 Merge pull request #309 from naveenatceg/staticbuild
ba6d660 Resolving merge conflict
51eeaf6 Addressing review comments
fd7d3fe Addressing review comments (adding a env variable to override temp directory)
cf7ace8 Addressing review comments
0a29ce5 re-enable BlockBasedTable::SetupForCompaction()
55af370 Remove TODO for checking index checksums
3d74f09 Fix compile
53b0039 Fix release compile
d0de413 WriteBatchWithIndex to allow different Comparators for different column families
57a32f1 change target_file_size_base to uint64_t
5e6aee4 dont create backup_input if compaction filter v2 is not used
49b5f94 Merge pull request #306 from Liuchang0812/fix_cast
787cb4d remove cast, replace %llu with % PRIu64
a7574d4 Update logging.cc
7e0dcb9 Update logging.cc
57fa3cc Merge pull request #304 from Liuchang0812/fix-check
cd44522 Merge pull request #305 from Liuchang0812/fix-logging
6a031b6 remove unused variable
4436f17 fixed #303: replace %ld with % PRId64
7a1bd05 Merge pull request #302 from ankgup87/master
423e52c Merge branch 'master' of https://github.com/facebook/rocksdb
bfeef94 Add rate limiter
32f2532 Print compression_size_percent as a signed int
976caca Skip AllocateTest if fallocate() is not supported in the file system
3b897cd Enable no-fbcode RocksDB build
f445947 RocksDB: Format uint64 using PRIu64 in db_impl.cc
e17bc65 Merge pull request #299 from ankgup87/master
b93797a Fix build
adae3ca [Java] Fix JNI link error caused by the removal of options.db_stats_log_interval
90b8c07 Fix unit tests errors
51af7c3 CuckooTable: add one option to allow identity function for the first hash function
0350435 Fixed a signed-unsigned comparison in spatial_db.cc -- issue #293
2fb1fea Fix syncronization issues
ff76895 Remove some unnecessary constructors
feadb9d fix cuckoo table builder test
3c232e1 Fix mac compile
54cada9 Run make format on PR #249
27b22f1 Merge pull request #249 from tdfischer/decompression-refactoring
fb6456b Replace naked calls to operator new and delete (Fixes #222)
5600c8f cuckoo table: return estimated size - 1
a062e1f SetOptions() for memtable related options
e4eca6a Options conversion function for convenience
a7c2094 Merge pull request #292 from saghmrossi/master
4d05234 Merge branch 'master' of github.com:saghmrossi/rocksdb
60a4aa1 Test use_mmap_reads
94e43a1 [Java] Fixed 32-bit overflowing issue when converting jlong to size_t
f9eaaa6 added include for inttypes.h to fix nonworking printf statements
f090575 Replaced "built on on earlier work" by "built on earlier work" in README.md
faad439 Fix #284
49aacd8 Fix make install
acb9348 [Java] Include WriteBatch into RocksDBSample.java, fix how DbBenchmark.java handles WriteBatch.
4a27a2f Don't sync manifest when disableDataSync = true
9b8480d Merge pull request #287 from yinqiwen/rate-limiter-crash-fix
28be16b fix rate limiter crash #286
04ce1b2 Fix #284
add22e3 standardize scripts to run RocksDB benchmarks
dee91c2 WriteThread
540a257 Fix WAL synced
24f034b Merge pull request #282 from Chilledheart/develop
49fe329 Fix build issue under macosx
ebb5c65 Add make install
0352a9f add_wrapped_bloom_test
9c0e66c Don't run background jobs (flush, compactions) when bg_error_ is set
a9639bd Fix valgrind test
d1f24dc Relax FlushSchedule test
3d9e6f7 Push model for flushing memtables
059e584 [unit test] CompactRange should fail if we don't have space
dd641b2 fix RocksDB java build
53404d9 add_qps_info_in cache bench
a52cecb Fix Mac compile
092f97e Fix comments and typos
6cc1286 Added a few statistics for BackupableDB
0a42295 Fix SimpleWriteTimeoutTest
06d9862 Always pass MergeContext as pointer, not reference
d343c3f Improve db recovery
6bb7e3e Merger test
88841bd Explicitly cast char to signed char in Hash()
5231146 MemTableOptions
1d284db Addressing review comments
55114e7 Some updates for SpatialDB
171d4ff remove TailingIterator reference in db_impl.h
9b0f7ff rename version_set options_ to db_options_ to avoid confusion
2d57828 Check stop level trigger-0 before slowdown level-0 trigger
659d2d5 move compaction_filter to immutable_options
048560a reduce references to cfd->options() in DBImpl
011241b DB::Flush() Do not wait for background threads when there is nothing in mem table
a2bb7c3 Push- instead of pull-model for managing Write stalls
0af157f Implement full filter for block based table.
9360cc6 Fix valgrind issue
02d5bff Merge pull request #277 from wankai/master
88a2f44 fix comments
7c16e39 Merge pull request #276 from wankai/master
8237738 replace hard-coded number with named variable
db8ca52 Merge pull request #273 from nbougalis/static-analysis
b7b031f Merge pull request #274 from wankai/master
4c2b1f0 Merge remote-tracking branch 'upstream/master'
a5d2863 typo improvement
9f8aa09 Don't leak data returned by opendir
d1cfb71 Remove unused member(s)
bfee319 sizeof(int*) where sizeof(int) was intended
d40c1f7 Add missing break statement
2e97c38 Avoid off-by-one error when using readlink
40ddc3d add cache bench
9f1c80b Drop column family from write thread
8de151b Add db_bench with lots of column families to regression tests
c9e419c rename options_ to db_options_ in DBImpl to avoid confusion
5cd0576 Fix compaction bug in Cuckoo Table Builder. Use kvs_.size() instead of num_entries in FileSize() method.
0fbb3fa fixed memory leak in unit test DBIteratorBoundTest
adcd253 fix asan check
4092b7a Merge pull request #272 from project-zerus/patch-1
bb6ae0f fix more compile warnings
6d31441 Merge pull request #271 from nbougalis/cleanups
0cd0ec4 Plug memory leak during index creation
4329d74 Fix swapped variable names to accurately reflect usage
45a5e3e Remove path with arena==nullptr from NewInternalIterator
5665e5e introduce ImmutableOptions
e0b99d4 created a new ReadOptions parameter 'iterate_upper_bound'
51ea889 Fix travis builds
a481626 Relax backupable rate limiting test
f7f973d Merge pull request #269 from huahang/patch-2
ef5b384 fix a few compile warnings
2fd3806 Merge pull request #263 from wankai/master
1785114 delete unused Comparator
1b1d961 update HISTORY.md
703c3ea comments about the BlockBasedTableOptions migration in Options
4b5ad88 Merge pull request #260 from wankai/master
19cc588 change to filter_block std::unique_ptr support RAII
9b976e3 Merge pull request #259 from wankai/master
5d25a46 Merge remote-tracking branch 'upstream/master'
9b58c73 call SanitizeDBOptionsByCFOptions() in the right place
a84234a Ignore missing column families
8ed70fc add assert to db Put in db_stress test
7f19bb9 Merge pull request #242 from tdfischer/perf-timer-destructors
8438a19 fix dropping column family bug
6614a48 Refactor PerfStepTimer to stop on destruct
076bd01 Fix compile
990df99 Fix ios compile
7dcadb1 Don't let flush preempt compaction in certain cases
dff2b1a typo improvement
985a31c Merge pull request #251 from nbougalis/master
f09329c Fix candidate file comparison when using path ids
7e9f28c limit max bytes that can be read/written per pread/write syscall
d20b8cf Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two.
0f9c43e ForwardIterator: reset incomplete iterators on Seek()
722d80c reduce recordTick overhead in compaction loop
22a0a60 Merge pull request #250 from wankai/master
be25ee4 delete unused struct Options
0c26e76 Merge pull request #237 from tdfischer/tdfischer/faster-timeout-test
1d23b5c remove_internal_filter_policy
2a8faf7 Compact SpatialDB as we go, not at the end
7f71448 Implementing a cache friendly version of Cuckoo Hash
d977e55 Don't let other compactions run when manual compaction runs
d5bd6c7 Fix ios compile
6b46f78 Merge pull request #248 from wankai/master
528a11c Update block_builder.h
536e997 Remove assert in vector rep
4142a3e Adding a user comparator for comparing Uint64 slices.
1913ce2 more concurrent flushes in SpatialDB
808e809 Adjust SpatialDB column family options
0c39f54 Use Vector memtable when bulk loading SpatialDB
b6fd781 Don't do memtable lookup in db_impl_readonly if memtables are empty while opening db.
9dcb75b Add is-file-deletions-enabled property
1755581 improve OptimizeForPointLookup()
d9c0785 Fix assertion in PosixRandomAccessFile
bda6f33 fix valgrind error in c_test caused by BlockBasedTableOptions
0db6b02 Update timeout to 50ms instead of 3.
ff6ec0e Optimize SpatialDB
2386185 ReadOptions.total_order_seek to allow total order seek for block-based table when hash index is enabled
a98badf print table options
66f62e5 JNI changes corresponding to BlockBasedTableOptions migration
3844001 move block based table related options BlockBasedTableOptions
17b54ae Merge pull request #243 from andybons/patch-1
0508691 Add missing include to use std::unique_ptr
42ea795 Fix concurrency issue in CompactionPicker
bb530c0 Merge pull request #240 from ShaoYuZhang/master
f76eda7 Fix compilation issue on OSX
08be7f5 Implement Prepare method in CuckooTableReader
47b452c Fix the error of c_test.c
562b7a1 Add missing implementaiton of SanitizeDBOptions in simple_table_db_test.cc
63a2215 Improve Options sanitization and add MmapReadRequired() to TableFactory
e173bf9 Eliminate VersionSet memory leak
10720a5 Revert the unintended change that DestroyDB() doesn't clean up info logs.
01cbdd2 Optimize storage parameters for spatialDB
045575a Add CuckooHash table format to table_reader_bench
7c5173d test: db: fix test to have a smaller timeout for when it runs on faster hardware
6929b08 Remove BitStream* tests
50b790c Removing BitStream* functions
162b815 Adding Column Family support in db_bench.
28b5c76 WriteBatchWithIndex: a wrapper of WriteBatch, with a searchable index
5585e00 Update release note of 3.4
343e98a Reverting import change
ddb8039 RocksDB static build Make file changes to download and build the dependencies .Load the shared library when RocksDB is initialized
68eed8c Bump up version
36e759d Adding Cuckoo Table SST option to db_bench
a6fd14c Fix valgrind error in c_test
c703715 attempt to fix auto_roll_logger_test
c8ecfae Merge pull request #230 from cockroachdb/spencerkimball/send-user-keys-to-v2-filter
570ba5a Avoid retrying to read property block from a table when it does not exist.
625b9ef Merge pull request #234 from bbiao/master
59a2763 Fix typo huage => huge
f611935 Fix autovector iterator increment/decrement comments
58b0f9d Support purging logs from separate log directory
2da53b1 [Java] Add purgeOldBackups API
6c4c159 fix_sst_dump_for_old_sst_format
8dfe2fd fix compile error under Mac OS X
58c4946 Allow env_posix to lower background thread IO priority
6a2be31 fix_valgrind_error_caused_in_db_info_dummper
e91ebf1 print compaction_filter name in Options.Dump
5a5953b Add histogram for DB_SEEK
5e64240 log db path info before open
0c9dc9f Remove malloc from FormatFileNumber
bcefede Update HISTORY.md
4808177 Revert "Include candidate files under options.db_log_dir in FindObsoleteFiles()"
0138b8e Fixed compile errors (signed / unsigned comparison) in cuckoo_table_db_test on Mac
1562653 Fixed a signed-unsigned comparison error in db_test
218857b remove tailing_iter.h/cc
5d0074c set bytes_per_sync to 1MB if rate limiter is enabled
3fcf7b2 Pass parsed user key to prefix extractor in V2 compaction
2fa6434 Add scope guard
06a52bd Flush only one column family
9674c11 Integrating Cuckoo Hash SST Table format into RocksDB

git-subtree-dir: src/rocksdb2
git-subtree-split: 25888ae0068c9b8e3d9421ea8c78a7be339298d8
2014-10-02 10:47:26 -07:00
Torrie Fischer
225f8ac12f Merge commit '92b8c7961b433d12d9d77da5d61c26a920bbd370' into updated-rocksdb 2014-10-02 10:47:26 -07:00
Howard Hinnant
1161511207 Fix two Wunused-private-field warnings. 2014-10-01 08:47:56 -07:00
Nicholas Dudfield
ca8eda412e Make travis build and use debug variants for tests 2014-10-01 08:47:56 -07:00
Mark Travis
ec4ec48fb8 Add counters to track nodestore read and write activities. 2014-10-01 08:47:56 -07:00
Nik Bougalis
c0b69e8ef7 Remove the use of beast::String from rippled (RIPD-443) 2014-10-01 08:47:55 -07:00
Tom Ritchford
4241dbb600 Clean and harden Transaction.
* Replace boolean parameter with enumerated type.
* Get rid of std::ref.
* 80-column cleanups.
* Replace an std::bin with a lambda.
2014-10-01 08:47:55 -07:00
Tom Ritchford
f54280aaad New DatabaseReader reads ledger numbers from database. 2014-10-01 08:47:55 -07:00
Tom Ritchford
6069400538 Fix compiler warnings under gcc. 2014-10-01 08:47:55 -07:00
Howard Hinnant
616be1d76c Miscellaneous cleanups:
* Limit HashPrefix construction and disallow assignment

* Give KnownFormats deleted copy members so that derived
  classes will give the right answers if queried with the
  std::is_copy_constructible/assignable traits.

* Replace SharedSingleton with a local static in
  LedgerFormats::getInstance() to be consistent with
  similar code in other places.  This also allows the
  LedgerFormats default constructor to be marked private
  so that the compiler enforces the design that
  LedgerFormats is a singleton type.

* Change return types of LedgerFormats::getInstance() and
  TxFormats::getInstance() from pointer to non-const to
  reference to const so as follow more established design
  guidelines for singletons.  This prevents pointers being
  mistaken for heap-allocated objects, and the const
  ensures the singleton isn't mutable.

* Change RippleAddress to inherit privately from
  CBase58Data instead of publicly.  This lets the compiler
  enforce that there are no unintended conversions from
  RippleAddress to CBase58Data.  This change allows us
  to remove a comment warning about unwanted conversions.
2014-10-01 08:47:54 -07:00
Nik Bougalis
8e91ce67c5 Allow beast::lexicalCast to parse 'true' & 'false' into a bool 2014-10-01 08:47:54 -07:00
JoelKatz
c1ecd661c3 Fix broken assert in built/validated ledger mismatch handler 2014-10-01 08:47:54 -07:00
JoelKatz
b27e2aad07 Improve transaction security
* Check signatures of every transaction on every validator
* Remove obsolete code
* Check transaction status in submit/sign RPC handler
2014-10-01 08:47:54 -07:00
MarkusTeufelberger
5ce508e09d Change output range names of ledger_cleaner
The input parameters are called "min_ledger" and "max_ledger", they are also called "minRange" and "maxRange" in the code BUT "ledger_min" and ledger_max" if printed. This is inconsistent and should be changed, as it might lead to confusion on how to call this module via RPC.
2014-10-01 08:47:53 -07:00
Nik Bougalis
3cfa5a41b1 Improve BuildInfo interface:
* Remove unnecessary beast::String dependency
* Explicitly cast to result type while packing a version
* Add unit tests for version formatting
2014-10-01 08:47:53 -07:00
Vinnie Falco
6c072f37ef Remove unused testoverlay module 2014-10-01 08:47:53 -07:00
Nik Bougalis
dbd993ed2b Use namespaces instead of static-only classes 2014-10-01 08:47:52 -07:00
Nik Bougalis
45b5c4ba7a Use deleted members to prevent copying in Beast (RIPD-268) 2014-10-01 08:47:52 -07:00
Nik Bougalis
7933e5d1f9 Use deleted members to prevent copying in rippled (RIPD-268) 2014-10-01 06:28:32 -07:00
Vinnie Falco
01e52e6f9f Use trusted validators median fee
Conflicts:
	src/ripple/app/misc/Validations.cpp
2014-10-01 06:28:32 -07:00
Vinnie Falco
40a955e192 Consume handshake data in HTTP/S server 2014-10-01 06:28:12 -07:00
Vinnie Falco
a8296f7301 Set version to 0.26.3-sp4 2014-09-30 18:04:59 -07:00
Vinnie Falco
590c3b876b Use trusted validators median fee 2014-09-30 18:03:53 -07:00
Vinnie Falco
6dfc805eaa Rewrite HTTP/S server to use coroutines:
* Fix bug with more than one complete request in a read buffer
* Use stackful coroutines for simplified control flow
* Door refactored to detect handshakes
* Remove dependency on MultiSocket
* Remove dependency on handshake detect logic framework
2014-09-30 13:29:32 -07:00
Vinnie Falco
5ce6068df5 Remove obsolete SharedArg 2014-09-29 07:18:51 -07:00
Nik Bougalis
bf9b8f4d1b Use secure RPC connections when configured 2014-09-28 04:39:49 -07:00
Vinnie Falco
d618581060 Config improvements:
* More fine-grained Section mutators
* Add remap for mapping legacy single sections to key value pairs
* Add output stream operators for BasicConfig and Section
* Allow section values to be overwritten from command line
* Update rpc key/value configs from command line
* Add RPC::Setup with defaults and remap legacy rpc sections
2014-09-28 04:39:49 -07:00
David Schwartz
2936bbfae8 Make path filtering smarter (RIPD-561)
* Break path liquidity checking into its own function
* Measure initial quality over minimum destination amount
* Test for available liquidity
2014-09-24 11:54:12 -07:00
Nik Bougalis
47b08bfc02 Add --quorum command line argument (RIPD-563) 2014-09-24 11:19:39 -07:00
Nik Bougalis
da4f77ca1f Return correct error message for invalid fields 2014-09-24 11:19:38 -07:00
David Schwartz
1c0a75d467 Distinguish Byzantine failure from tx bug (RIPD-523) 2014-09-24 11:19:38 -07:00
Nik Bougalis
659cf0c221 Decouple LedgerMaster from configuration 2014-09-24 11:19:38 -07:00
Howard Hinnant
430229fd84 Mark several Ledger member functions as const. 2014-09-24 11:19:37 -07:00
Howard Hinnant
81699a0971 Add +DEBUG to the raw version string for DEBUG builds.
This will show up in the rpc server_info command.
There is no impact on the version string for release builds.
2014-09-24 11:19:37 -07:00
MarkusTeufelberger
c54aff74b3 Build gcc.debug using -Og flag
Since gcc 4.8 is required anyways, it might be nice to use its features.

Intro to feature (second bullet point):
https://gcc.gnu.org/gcc-4.8/changes.html

-g (line 328) is still needed:
http://stackoverflow.com/questions/12970596/gcc-4-8-does-og-imply-g
2014-09-24 11:19:37 -07:00
Mark Travis
7f43ab9097 Improvements to SConstruct:
* Default target is release instead of debug (scons with no arguments).
* All targets now include debug symbols, including release.
Rationale: "out of the box" builds of rippled using plain "scons" or "scons -j4" will produce
a debug instead of a release build, which could underperform.
2014-09-24 11:19:36 -07:00
Miguel Portilla
d78f740250 Add account_offers paging (RIPD-344) 2014-09-19 16:38:10 -07:00
Miguel Portilla
cd1bd18a49 Add account_lines paging (RIPD-343) 2014-09-19 16:18:50 -07:00
sublimator
f81b084448 Set page sizes for ledger_data correctly (RIPD-249) 2014-09-19 16:16:49 -07:00
Vinnie Falco
02d9c77402 Set version to 0.26.3-sp2 2014-09-19 11:57:22 -07:00
Howard Hinnant
a0c903c68c Add needed #include <istream>
This is needed for the combination of boost 1.56 and libc++
2014-09-19 10:29:14 -07:00
JoelKatz
6aa325d3da On missing node in consensus, bow out (RIPD-567) 2014-09-18 15:12:45 -07:00
JoelKatz
041f874d4c Improve transaction security
* Check signatures of every transaction on every validator
* Remove obsolete code
* Check transaction status in submit/sign RPC handler
2014-09-18 14:25:09 -07:00
Nik Bougalis
526ecd6a81 Detect invalid inputs during STAmount conversion (RIPD-570):
* More robust validation of input
* XRP may not be specified using fractions
* Prevent creating native amounts larger than max possible value
* Add unit tests to verify correct parsing
2014-09-18 12:46:21 -07:00
Nik Bougalis
d373054fc4 Templetize and improve beast string-to-integer conversions:
* Properly handle numbers at the edge of precision
* Improve and expand unit test coverage
2014-09-18 12:46:16 -07:00
Vinnie Falco
b6d9f1d4b2 Add fee voting configuration and docs (RIPD-564) 2014-09-17 12:22:51 -07:00
Vinnie Falco
3fef916972 Move some constants to core/SystemParameters.h 2014-09-17 12:22:49 -07:00
Vinnie Falco
89a51e5b91 Split Section to its own header and add convenience accessors 2014-09-17 12:22:49 -07:00
miguelportilla
f87a6ccc7a Fix missing includes for boost 1.56.0 2014-09-16 15:22:00 -07:00
Nik Bougalis
f65cea66ef Remove unused macros, config variables, and file 2014-09-16 14:15:13 -07:00
Vinnie Falco
4239880acb Clean up and restructure sources 2014-09-16 14:15:12 -07:00
Vinnie Falco
1dcd06a1c1 Add missing includes and tidy up 2014-09-16 14:03:50 -07:00
Vinnie Falco
0f30191d10 Refactor STAmount:
* Remove unused functions
* Remove unused constructor
* Use delegating constructors
* Mark some observers deprecated
* Clean up declaration parameter names
* Add checked and unchecked constructors
* De-inline unnecessary inlined functions
* Reorder and regroup members into sections
* Move globals from the unity file to the .cpp
* Change some member functions to be free functions
* Put implementation in one .cpp and the test in another .cpp

Remove unused STAmount constructor and delegate two others No change in functionality.
2014-09-16 07:39:50 -07:00
Vinnie Falco
8fb9d5daaa Set version to 0.26.4-alpha 2014-09-15 18:20:47 -07:00
Nik Bougalis
ed3c942ff1 Inject JobQueue in NetworkOPs 2014-09-15 16:05:01 -07:00
Nik Bougalis
80436d4a8b Cleanups:
* Remove obsolete string formatting function
* Remove unused ADDRESS macro
* Re-scope functions
2014-09-15 16:04:48 -07:00
Howard Hinnant
cfc702c766 Fix beast::http::headers move members 2014-09-15 16:03:36 -07:00
Vinnie Falco
88ae15ea8e Add base64 conversions and tests 2014-09-15 14:52:42 -07:00
Vinnie Falco
6bafca7386 Use transform_iterator in http::headers 2014-09-15 14:52:42 -07:00
Vinnie Falco
379e842080 Add BasicConfig simplified config interface 2014-09-15 12:46:04 -07:00
Vinnie Falco
c41ce469d0 Cleanup:
* Move QUALITY_ONE to Quality.h
* Move functional files up one level
* Remove core.h
* Merge routines into Config.cpp
* Rename Section to IniFileSections
* Rename IniFileSections routines
2014-09-15 12:21:36 -07:00
Vinnie Falco
a1ca68473d Merge branch 'release' into develop 2014-09-14 15:39:09 -07:00
Nik Bougalis
3345d03433 Avoid conversions whenever possible during RippleState lookups 2014-09-13 11:06:38 -07:00
Nik Bougalis
81a426608a Make log partitions case-insensitive 2014-09-13 11:06:19 -07:00
Vinnie Falco
2ad6f0a65e Set version to 0.26.3-sp1 2014-09-12 15:22:54 -07:00
Vinnie Falco
ee8bd8ddae Fix handling of HTTP/S keep-alives (RIPD-556):
* Proper shutdown for ssl and non-ssl connections
* Report session id in history
* Report histogram of requests per session
* Change print name to 'http'
* Split logging into "HTTP" and "HTTP-RPC" partitions
* More logging and refinement of logging severities
* Log the request count when a session is destroyed

Conflicts:
	src/ripple/http/impl/Peer.cpp
	src/ripple/http/impl/Peer.h
	src/ripple/http/impl/ServerImpl.cpp
	src/ripple/module/app/main/Application.cpp
	src/ripple/module/app/main/RPCHTTPServer.cpp
2014-09-12 15:19:17 -07:00
Vinnie Falco
319ac14e7d Add is_short_read() 2014-09-12 15:19:04 -07:00
Vinnie Falco
0215a7400d Fix handling of HTTP/S keep-alives (RIPD-556):
* Proper shutdown for ssl and non-ssl connections
* Report session id in history
* Report histogram of requests per session
* Change print name to 'http'
* Split logging into "HTTP" and "HTTP-RPC" partitions
* More logging and refinement of logging severities
* Log the request count when a session is destroyed
2014-09-12 14:20:30 -07:00
Vinnie Falco
79db0ca7a6 Add is_short_read() 2014-09-12 14:10:33 -07:00
JoelKatz
1a7eafb699 Add ledger cleaner documentation (RIPD-555) 2014-09-09 22:33:42 -07:00
Nik Bougalis
81a06ea6cd Cleanups:
* Remove obsolete config variables
* Reduce coupling
* Use C++11 ownership containers
* Use auto when it makes sense
* Detect edge-case in unit tests
* Reduce the number of LedgerEntrySet public members
2014-09-09 22:33:42 -07:00
Nik Bougalis
de4be649ab Refactor string-to-integer conversions 2014-09-09 21:38:09 -07:00
sublimator
d90ec5f06c Normalize sort paths in Visual Studio project generator 2014-09-08 11:17:40 -07:00
Vinnie Falco
32065ced6e Add peer count to HTTP server properties 2014-09-08 11:17:39 -07:00
Scott Schurr
b5224a2227 Improve regularity of STObject and STArray (RIPD-448, RIPD-544):
* reduce duplicated code using templates
* replace BOOST_FOREACH with C++11 for loops
* remove most direct calls to new
* limit line length to 80 characters
* clearly identify virtual and overridden methods
* split STObject and STArray into their own files
* name files after the class they contain
2014-09-05 13:02:07 -07:00
Nik Bougalis
c55777738f Refactor LedgerEntrySet:
* Split adjustOwnerCount to increment and decrement paths.
* Move pathfinding-specific functions out of LedgerEntrySet
* Convert members to free functions
2014-09-05 11:50:17 -07:00
JoelKatz
c72dff5a24 Make more RocksDB tunables
Add support for universal compaction
2014-09-05 11:50:17 -07:00
JoelKatz
6b09e49c08 Increase the size of the tree cache:
This change will not significantly increase memory consumption
because most entries are pinned anyway.
2014-09-05 11:48:00 -07:00
Nik Bougalis
413218c4c4 Create the directory for the debug_logfile (RIPD-551) 2014-09-04 16:51:31 -07:00
Miguel Portilla
16c04b50ee Add date to tx command (RIPD-542) 2014-09-04 16:51:31 -07:00
Nik Bougalis
56c18f7768 Cleanups and fixes (RIPD-532):
* Properly handle sfWalletLocator field
* Plug a tiny memory leak
* Avoid naked pointers
* Remove unused variables
* Other small cleanups
2014-09-04 16:51:31 -07:00
Tom Ritchford
22ca13bc78 Cleanups to RPC code 2014-09-04 16:51:31 -07:00
Nicholas Dudfield
4c7fd18230 Ticket integration tests 2014-09-04 16:51:31 -07:00
Nik Bougalis
39730fc13e Ticket issuing (RIPD-368):
* New CreateTicket transactor to create tickets
* New CancelTicket transactor to cancel tickets
* Ledger entries for tickets & associated functions
* First draft of M-of-N documentation
2014-09-04 16:11:44 -07:00
Nik Bougalis
889c0a0d0f Transactor refactor:
* Allocate transactors on the stack instead of the heap.
* Remove header files and reduce transactor public interface.
2014-09-04 16:11:44 -07:00
Nik Bougalis
624a803955 Handle whitespace separating an 'ip port' correctly (RIPD-552) 2014-09-04 12:26:27 -07:00
Vinnie Falco
9f5c21f80e Set version to 0.26.3 2014-09-03 16:15:07 -07:00
Nik Bougalis
a3fe089367 Fix missing return value error check 2014-09-02 08:45:19 -07:00
Nik Bougalis
85d5cd3118 Fix missing return value error check 2014-09-01 21:11:31 -07:00
Vinnie Falco
61006e626d Also report mismatched built ledger 2014-08-28 18:03:50 -07:00
Nik Bougalis
15aad1cb24 Optimize pathfinding operations (RIPD-537):
* Calculate and cache Account hashes without holding locks.
* Fast hash-based path element comparison.
* Use emplace instead of find/insert
2014-08-28 15:57:29 -07:00
Tom Ritchford
95c1c5f54e Stream generated JSON. 2014-08-28 12:38:21 -07:00
Vinnie Falco
c65fb91878 Fix special members for http classes 2014-08-28 12:38:03 -07:00
Vinnie Falco
d5a7e1331e Set version to 0.26.3-rc3 2014-08-27 15:58:41 -07:00
Vinnie Falco
04bcd93ba3 HTTP(S)-RPC server improvements (RIPD-489, RIPD-533):
* Correct handling of Keep-Alive in socket handlers
* Report session history in print command
2014-08-27 18:06:30 -04:00
Vinnie Falco
f97ef7039a HTTP message and parser improvements:
* streambuf wrapper supports rvalue move
* message class holds a complete HTTP message
* body class holds the HTTP content body
* headers class holds RFC-compliant HTTP headers
* basic_parser provides class interface to joyent's http-parser
* parser class parses into a message object
* Remove unused http get client free function
* unit test for parsing malformed messages
2014-08-27 18:06:30 -04:00
Tom Ritchford
9160b46c1e Bug fixes and new features for LedgerTool:
* Fix RIPD-509, RIPD-514, RIPD-519, RIPD-525, RIPD-527, RIPD-529,
  RIPD-530 and RIPD-531.
* Protect people from ledger-spew and remove cruft.
* Better error messages and handling.
* Cache command lists or clears ledger cache.
* Better ledger summaries.
* Offline mode.
2014-08-27 18:05:44 -04:00
Edward Hennis
aa4b116498 Wrap RippleCalc into a single function (RIPD-500):
* Change public members of Input and Output to remove trailing _.
* Remove Input constructor and separate flags in RippleCalc to reduce
  duplication and confusion.
* Make calculation result private; add getter.
* Narrow scope of some of the results of calls to rippleCalculate.
2014-08-27 18:05:30 -04:00
Edward Hennis
612bb71165 Add enable_if_lvalue 2014-08-27 17:10:24 -04:00
Torrie Fischer
5c67f99ef9 Remove old rocksdb/ 2014-08-27 12:37:13 -07:00
Torrie Fischer
101a4808a0 Update includes and scons 2014-08-27 12:37:05 -07:00
Torrie Fischer
1d38671f5e Squashed 'src/rocksdb2/' content from commit 37c6740
git-subtree-dir: src/rocksdb2
git-subtree-split: 37c6740c383bb9a6ee2747b04f08bc77fcfa10c5
2014-08-27 12:36:50 -07:00
Torrie Fischer
7f25d88f02 Merge commit '1d38671f5edc2322bc58417816674cc629ae7a70' as 'src/rocksdb2' 2014-08-27 12:36:50 -07:00
Vinnie Falco
be830d3dad Set version to 0.26.3-rc2 2014-08-26 10:00:03 -07:00
Nik Bougalis
5bc949d70f Fix a unit test warning 2014-08-26 10:00:03 -07:00
Vinnie Falco
43817bd722 Merge remote-tracking branch 'upstream/release' into tmp 2014-08-26 09:46:29 -07:00
JoelKatz
61623d6d75 Improve parallelization of getRippleLines 2014-08-26 09:28:10 -07:00
David Schwartz
9aad60f56d Make sure we update mTNByID when we replace the root 2014-08-26 09:28:09 -07:00
Vinnie Falco
e7cf3e8084 Add ledger.history.mismatch insight statistic 2014-08-26 09:28:07 -07:00
Tom Ritchford
e024e7c2ec Compile git tags now include name and branch. (RIPD-493) 2014-08-22 18:10:17 -04:00
Edward Hennis
6fc136ae9a Update npm tests & config to pass in Windows. (RIPD-209)
* Extend timeout for WebSocket test.
* Windows networking doesn't like connecting to 0.0.0.0. Use 127.0.0.1
* Additional command line options in config. Can potentially be used to run rippled in a debugger.
2014-08-22 18:10:17 -04:00
Edward Hennis
2b69ded1ea Convert rvalue to an lvalue. (RIPD-494)
* The rvalue gets destructed as soon as "rc" is constructed.
2014-08-22 18:10:17 -04:00
Tom Ritchford
8dd799aa6f New command line LedgerTool. (RIPD-243)
* Retrieve and process summary or full ledgers.
* Search using arbitrary criteria (any Python function).
* Search using arbitrary formats (any Python function).
* Caches ledgers as .gz files to avoid repeated server requests.
* Handles ledger numbers, ranges, and special names like validated or closed.
2014-08-22 18:10:11 -04:00
Vinnie Falco
7230ef41ee Fix warnings and compile errors 2014-08-20 17:44:00 -07:00
Edward Hennis
a86f0a743c Clean up some docs
* Consistent line endings in rippled-example.cfg
* Rewrite CodingStyle.md. Get down to top priorities
2014-08-20 16:20:15 -07:00
Josh Juran
af75b55ef7 src/README.md: s/addded/added/ 2014-08-20 16:19:28 -07:00
Vinnie Falco
9ecb37dd4f Add validators aged container test 2014-08-20 16:09:52 -07:00
Vinnie Falco
2e3784a914 Tidy up sources 2014-08-20 16:09:51 -07:00
Scott Schurr
019c1af435 Use aged containers in Validators module (RIPD-349) 2014-08-20 16:09:51 -07:00
Vinnie Falco
5322955f2b Fix exception safety in aged containers 2014-08-20 16:09:50 -07:00
Howard Hinnant
a8ea4ce283 Fix move constructor of aged_unordered_container (RIPD-490) 2014-08-20 16:08:59 -07:00
Mark Travis
c12862f60d Enable heap profiling with jemalloc:
The jemalloc library (which must be downloaded and installed separately)
is required to perform heap profiling. Instructions on how to enable heap
profiling with rippled are available in doc/HeapProfiling.md
2014-08-13 20:36:38 -07:00
Tom Ritchford
e889183fc5 JSON cleanups:
* Fix Json headers to include what they use.
* Get rid of ripple/json/api directory.
2014-08-13 20:36:36 -07:00
Jeff Trull
7be695c6bd Handle changes to boost::optional in newly released boost 1.56:
To improve compatibility with the proposed std::optional a number
of changes were made, one of which is the removal of the implicit
conversion to bool.  As a result, returning boost::optional as a
bool value now fails.  Explicit conversion to bool used for clarity.
2014-08-13 19:00:17 -07:00
Nik Bougalis
956901ae02 Properly handle edge-cases when parsing JSON integers (RIPD-470):
* Properly handle both unsigned and signed integers
* Return parsing error for overlong JSON numbers
* Implement unit test checking the edge cases that are of interest
2014-08-13 18:46:32 -07:00
Nik Bougalis
d562c5b2d5 Account for high-ascii (RIPD-464) 2014-08-13 18:46:32 -07:00
Nik Bougalis
d7b054c3f6 When compiling debug builds with GCC use _FORTIFY_SOURCE=2:
This gcc/glibc feature adds some (supposedly) lightweight checks which can
help detect errors such as buffer overflows.
2014-08-13 18:33:30 -07:00
Tom Ritchford
901ccad0cf Clarify unfunded offer deletion strategy. 2014-08-13 18:33:03 -07:00
Vinnie Falco
b9454e0f0c Set version to 0.26.2 2014-08-12 12:19:13 -07:00
Vinnie Falco
26181907fc Merge branch 'release-next' into release
Conflicts:
	src/ripple/module/app/paths/cursor/ForwardLiquidity.cpp
	src/ripple/overlay/tests/peer_info.test.cpp
	src/ripple/unity/http.h
2014-08-12 12:19:04 -07:00
Miguel Portilla
8368798ad2 Add owner_funds to subscription streams (RIPD-377) 2014-08-12 11:56:07 -07:00
Miguel Portilla
ed597e5e99 Add owner_funds to subscription streams (RIPD-377) 2014-08-12 11:54:49 -07:00
Miguel Portilla
2c88c15f7f Fix unhandled exception in async HTTP server (RIPD-475) 2014-08-12 11:54:02 -07:00
Miguel Portilla
af7cd3cc04 Fix unhandled exception in async HTTP server (RIPD-475) 2014-08-12 11:46:45 -07:00
Howard Hinnant
9552551f9a Refactor SField (RIPD-431)
* Restrict access to SField constructors.
* Make all SField access const.
* Hide and simplify databases used to hold SField constants.
* Separate the two concerns of representing a field,
  and maintaining a database of fields.
2014-08-08 19:16:51 -07:00
Edward Hennis
1c73a0f649 Remove unused code:
* StringConcat was only being referenced by unit test.
* `runall.sh` is no longer needed. Use `npm test` instead.
2014-08-08 19:16:42 -07:00
Tom Ritchford
e3698b2a07 Fix Pathfinder::getPathsOut to use Issue 2014-08-08 19:16:34 -07:00
Tom Ritchford
c841f8b360 Add the git tag to the compile (RIPD-238) 2014-08-08 19:16:33 -07:00
wltsmrz
50f9b68d61 Bump ledger_wait timeout for Travis 2014-08-08 14:57:39 -07:00
Nik Bougalis
27b48bc16e Refactor beast::SemanticVersion (RIPD-199) 2014-08-08 14:57:39 -07:00
Edward Hennis
bccdbaed2b Update Doxyfile (RIPD-175):
* Clean up the include path to reflect updated code structure.
* Update PROJECT_BRIEF to more accurate title.
* Fix location of logo graphic.
* Hide all undocumented classes.
2014-08-08 14:57:39 -07:00
Nik Bougalis
398095a667 Cleanups and performance optimizations (RIPD-450):
* Remove AccountItems and AccountItem
* Restructure RippleLineCache to not require shared_ptr
* Avoid expensive copies of base_uint<160> in RippleState
2014-08-08 14:57:39 -07:00
Nik Bougalis
80095824b9 Remove obsolete nickname support 2014-08-08 14:57:39 -07:00
Nik Bougalis
d91c1f96cc Detect node store batch write failures (RIPD-270):
* Raise open file limit from the soft max to the hard max.
2014-08-08 14:57:39 -07:00
Howard Hinnant
1c005a0292 Fix macro setting for rocksdb unity file 2014-08-08 14:50:15 -07:00
miguelportilla
c7ced496ac Update rocksdb unity file (RIPD-352) 2014-08-08 15:42:41 -04:00
Vinnie Falco
d4ff18834c Merge commit 'f86d9fd626df1cee55bce4c577d06bb064dc827b' as 'src/rocksdb' 2014-08-08 11:57:41 -07:00
Vinnie Falco
f86d9fd626 Squashed 'src/rocksdb/' content from commit 224932d
git-subtree-dir: src/rocksdb
git-subtree-split: 224932d4d0
2014-08-08 11:57:41 -07:00
Vinnie Falco
854604f724 Remove rocksdb in preparation for subtree add 2014-08-08 11:57:29 -07:00
Vinnie Falco
cfd3642cb1 Set version to 0.26.2-alpha-4 2014-08-07 17:13:55 -07:00
Tom Ritchford
f493590604 Logic fix for multiquality issues. 2014-08-07 17:10:38 -07:00
Vinnie Falco
8c084a3de8 Set version to 0.26.2-alpha-3 2014-08-07 10:13:22 -07:00
Tom Ritchford
985aa803a4 Fix error with multi-quality paths. 2014-08-07 10:12:09 -07:00
Vinnie Falco
9e319d7bd6 Set version to 0.26.2-alpha-2 2014-08-06 10:16:13 -07:00
David Schwartz
a122e176d7 Pathfinding fixes:
* Don't consider global freeze if not enforcing
 * Log if a covering path fails to cover
2014-08-06 10:16:12 -07:00
Tom Ritchford
88a6f2931e Fix local variable unfundedOffers that was shadowing a class variable. 2014-08-06 09:26:22 -07:00
Vinnie Falco
54f3a83e25 Fix missing return values in headers_t 2014-08-05 16:43:13 -07:00
Vinnie Falco
0955c0d8d3 Set version to 0.26.2-alpha-1 2014-08-05 15:40:34 -07:00
JoelKatz
6bb5be5216 Avoid a mutex 99+% of the time in SField::getField 2014-08-05 15:36:12 -07:00
JoelKatz
c9cd7e4be0 Rewrite STObject::setType for improved performance 2014-08-05 15:36:05 -07:00
Miguel Portilla
ce2cecf046 Add owner_funds to client subscription data (RIPD-377)
Conflicts:
	src/ripple/module/app/ledger/AcceptedLedger.cpp
2014-08-05 15:04:50 -07:00
Vinnie Falco
6e934ee6a1 HTTP handshake in peer protocol (RIPD-351):
* New I/O paths for client and server role
* New handshake_analyzer detects the peer protocol
* New basic_message class for parsing and storing HTTP messages
* Conditional compilation for selective feature enabling.
* Server supports both current handshake and HTTP handshake
2014-08-05 13:17:02 -07:00
Vinnie Falco
723d7d1263 HTTP support improvements:
* RFC2616 compliance
* Case insensitive equality, inequality operators for strings
* Improvements to http::parser
* Tidy up HTTP method enumeration
2014-08-05 13:17:01 -07:00
Scott Schurr
298572893e Improvements to aged_containers (RIPD-363)
- Added unit tests for element erase
- Added unit tests for range erase
- Added unit tests for touch
- Added unit tests for iterators and reverse_iterators
- Un-inlined operator== for unordered containers
- Fixed minor problems with ordered_container erase()
- Made ordered_container...
  - erase (reverse_iterator pos) not compile
  - erase (reverse_iterator first, reverse_iterator last) not compile
  - touch (reverse iterator pos) not compile
- Verified that ordered container...
  - insert() already rejects reverse_iterator
  - emplace_hint() already rejects reverse_iterator
- Made set/multiset iterators const

Regarding the set/multiset iterators, see section 1.5 of
http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2009/n2913.pdf
as pointed out by Vinnie.
2014-08-04 15:25:30 -07:00
Mark Travis
405f6f7368 Make NetworkOPs::isFull() thread-safe 2014-08-04 11:19:07 -07:00
Tom Ritchford
648ccc7c17 Replace const Type& with Type const& for common types.
* std::string
* RippleAccount
* Account
* Currency
* uint256
* STAmount
* Json::Value
2014-08-04 11:18:44 -07:00
Mark Travis
f5afe0587f Fix filter_policy object leak in NodeStore backends 2014-08-04 11:16:44 -07:00
Tom Ritchford
91a227a475 Correct Pathfinder::getPaths out to handle order books (RIPD-427) 2014-08-02 10:38:57 -07:00
Tom Ritchford
4b905fe9ff Clean OrderBookDB::getBooksBy methods. 2014-08-02 10:38:57 -07:00
Howard Hinnant
0f409b7bec ASCII clean 2014-08-02 10:38:56 -07:00
Vinnie Falco
295c8de858 Detect inconsistency in PeerFinder self-connects (RIPD-411) 2014-07-31 16:10:54 -07:00
Nik Bougalis
e5252f90af Remove LedgerBase and decongest Ledger locks 2014-07-31 16:05:08 -07:00
Miguel Portilla
c2276155bf Add pubkey_node and hostid to server stream messages (RIPD-407) 2014-07-31 16:05:08 -07:00
Miguel Portilla
dbe49bcd87 Remove TRUST_NETWORK directive (RIPD-331) 2014-07-31 16:04:59 -07:00
David Schwartz
7b936de32c Freeze enforcing: (RIPD-399)
* Set enforce date: September 15, 2014
 * Enforce in stand alone mode
 * Enforce at source
 * Enforce intermediary nodes
 * Enforce global freeze in get paths out
 * Enforce global freeze in create offer
 * Don't consider frozen links a path out
 * Handle in getBookPage
 * Enforce in new offer transactors
2014-07-30 23:28:48 -07:00
evhub
9eb34f542c More fixes to VSProject sorting algorithm 2014-07-30 08:56:54 -07:00
Tom Ritchford
194304e544 Refactor RippleCalc:
* Rationalize method and filenames, move to subdirectory.
* Use Issue in Node.
* Restrict access to PathState variables.
* Line length and readability cleanups.
* New PathCursor stores path calculation data during rippleCalc.
* Extract methods PathCursor::node(), PathCursor::previousNode()
  and RippleCalc::addPath
2014-07-30 08:29:29 -07:00
Howard Hinnant
c59fc332d5 Make CBase58Data/RippleAddress movable (RIPD-428):
This significantly increases the performance in returning
these types from factory functions.
2014-07-30 07:25:24 -07:00
Nik Bougalis
b43832fe57 Use std::atomic 2014-07-29 21:50:58 -04:00
Howard Hinnant
c24a497a23 Further documentation improvements to Ledger Consensus. 2014-07-29 21:41:19 -04:00
Tom Ritchford
4096fcd1bf Reduce scope of RPC locks and general cleanup (RIPD-458) 2014-07-29 16:24:44 -04:00
Howard Hinnant
58547f6997 Tidy up hardened containers (RIPD-380):
* Rename hardened containers for clarity
* Fixes https://ripplelabs.atlassian.net/browse/RIPD-380
2014-07-28 09:06:35 -07:00
Howard Hinnant
e6f4eedb1e Tidy up hardened_hash:
* Added siphash as a HashAlgorithm
* Refactored class responsibilities
2014-07-28 09:06:22 -07:00
Vinnie Falco
c5b963141f Fix intrinsic calls in static_initializer 2014-07-28 09:04:41 -07:00
Nik Bougalis
5df40bd746 Fix auth handling during OfferCreate (RIPD-414) 2014-07-28 08:57:22 -07:00
Howard Hinnant
403f15dc48 Documentation for Ledger Consensus implementation (RIPD-405) 2014-07-28 08:56:23 -07:00
Vinnie Falco
fa11071443 Enable asynchronous handling of HTTP-RPC (RIPD-390)
* Activate async code path
* Tidy up HTTP server code
* Use shared_ptr in HTTP server
* Remove check for unspecified IP
* Remove hairtrigger
* Fix missing HTTP authorization check
* Fix multisocket flags in RPC-HTTP server
* Fix authorization failure when no credentials required
* Addresses RIPD-159, RIPD-161, RIPD-390
2014-07-24 20:22:55 -07:00
Vinnie Falco
87351c8a0c Add HTTPHeaders::build_map 2014-07-24 20:18:51 -07:00
Vinnie Falco
2f5fb1e68e Make bind_handler variadic ctor explicit 2014-07-24 20:18:51 -07:00
Tom Ritchford
96e1ec6d31 Fix build warnings and .gitignore.
* Comment out unused local function for both clang and g++.
* Get rid of numerous Boost warnings for clang.
* Remove some unused local variables.
* Put TAGS into the .gitignore.
2014-07-24 20:18:51 -07:00
Nik Bougalis
ac3cf05f1a Properly order warning suppression flags during compile 2014-07-24 20:18:51 -07:00
Tom Ritchford
6335e34395 Simplify locking and move a typedef.
* Make DatabaseCon's lock private and expose a scoped lock_guard.
* Get rid of DeprecatedRecursiveMutex and DeprecatedScopedLock entirely.
* Move CancelCallback to Job where it logically belongs.
2014-07-23 19:38:52 -07:00
Scott Schurr
02c2029ac1 Add more documentation of ledger acquisition (RIPD-373)
Capturing information from a seminar on the topic into the source tree.
2014-07-23 19:29:13 -07:00
David Schwartz
6914aa3e27 Check for payment increments that make no progress. (RIPD-374) 2014-07-23 19:27:55 -07:00
David Schwartz
f4fcb1cc9a Remove some dead SHAMapMissingNode code 2014-07-23 19:27:50 -07:00
David Schwartz
b6eec21ec0 Pathfinder cleanups, more efficient 'd' handling (RIPD-156) 2014-07-23 19:27:46 -07:00
David Schwartz
0ce3aeb189 Stop finding paths when enough are found (RIPD-156) 2014-07-23 19:27:42 -07:00
Nicholas Dudfield
713c8efcbe Fix intermittently failing send test 2014-07-23 02:27:19 -07:00
1506 changed files with 81326 additions and 66980 deletions

1
.gitignore vendored
View File

@@ -19,6 +19,7 @@
*.o
build
tags
TAGS
bin/rippled
Debug/*.*
Release/*.*

View File

@@ -35,23 +35,35 @@ before_install:
# What versions are we ACTUALLY running?
- g++ -v
- clang -v
# Avoid `spurious errors` caused by ~/.npm permission issues
# Does it already exist? Who owns? What permissions?
- ls -lah ~/.npm || mkdir ~/.npm
# Make sure we own it
- sudo chown -R $USER ~/.npm
script:
# Set so any failing command will abort the build
- set -e
# If only we could do -j12 ;)
- scons
# $CC will be either `clang` or `gcc` (If only we could do -j12 ;)
- scons $CC.debug
# We can be sure we're using the build/$CC.debug variant (-f so never err)
- rm -f build/rippled
- export RIPPLED_PATH="$PWD/build/$CC.debug/rippled"
# See what we've actually built
- ldd ./build/rippled
- ldd $RIPPLED_PATH
# Run unittests (under gdb)
- | # create gdb script
echo "set env MALLOC_CHECK_=3" > script.gdb
echo "run" >> script.gdb
echo "backtrace full" >> script.gdb
# gdb --help
- cat script.gdb | gdb --ex 'set print thread-events off' --return-child-result --args ./build/rippled --unittest
# Run integration tests
- cat script.gdb | gdb --ex 'set print thread-events off' --return-child-result --args $RIPPLED_PATH --unittest
- npm install
# Use build/(gcc|clang).debug/rippled
- |
echo "exports.default_server_config = {\"rippled_path\" : \"$RIPPLED_PATH\"};" > test/config.js
# Run integration tests
- npm test
notifications:
email:

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
Name: rippled
Version: 0.26.1
Version: 0.26.4
Release: 1%{?dist}
Summary: Ripple peer-to-peer network daemon

View File

@@ -67,6 +67,7 @@ CHECK_LINE = 'built on: '
BUILD_TIME = 'Mon Apr 7 20:33:19 UTC 2014'
OPENSSL_ERROR = ('Your openSSL was built on %s; '
'rippled needs a version built on or after %s.')
UNITY_BUILD_DIRECTORY = 'src/ripple/unity/'
def check_openssl():
if Beast.system.platform in CHECK_PLATFORMS:
@@ -200,10 +201,6 @@ def config_base(env):
env.Append(CPPDEFINES=['OPENSSL_NO_SSL2'])
#git = Beast.Git(env) # TODO(TOM)
if False: #git.exists:
env.Append(CPPDEFINES={'GIT_COMMIT_ID' : '"%s"' % git.commit_id})
try:
BOOST_ROOT = os.path.normpath(os.environ['BOOST_ROOT'])
env.Append(CPPPATH=[
@@ -234,6 +231,15 @@ def config_base(env):
env.Prepend(CPPPATH='%s/include' % openssl)
env.Prepend(LIBPATH=['%s/lib' % openssl])
# handle command-line arguments
profile_jemalloc = ARGUMENTS.get('profile-jemalloc')
if profile_jemalloc:
env.Append(CPPDEFINES={'PROFILE_JEMALLOC' : profile_jemalloc})
env.Append(LIBS=['jemalloc'])
env.Append(LIBPATH=[os.path.join(profile_jemalloc, 'lib')])
env.Append(CPPPATH=[os.path.join(profile_jemalloc, 'include')])
env.Append(LINKFLAGS=['-Wl,-rpath,' + os.path.join(profile_jemalloc, 'lib')])
# Set toolchain and variant specific construction variables
def config_env(toolchain, variant, env):
if variant == 'debug':
@@ -247,15 +253,19 @@ def config_env(toolchain, variant, env):
env.ParseConfig('pkg-config --static --cflags --libs openssl')
env.ParseConfig('pkg-config --static --cflags --libs protobuf')
env.Prepend(CCFLAGS=['-Wall'])
env.Prepend(CFLAGS=['-Wall'])
env.Prepend(CXXFLAGS=['-Wall'])
env.Append(CCFLAGS=[
'-Wno-sign-compare',
'-Wno-char-subscripts',
'-Wno-format',
'-Wno-deprecated-register'
'-g' # generate debug symbols
])
if toolchain == 'clang':
env.Append(CCFLAGS=['-Wno-redeclared-class-member'])
env.Append(CXXFLAGS=[
'-frtti',
'-std=c++11',
@@ -281,15 +291,18 @@ def config_env(toolchain, variant, env):
])
boost_libs = [
'boost_coroutine',
'boost_context',
'boost_date_time',
'boost_filesystem',
'boost_program_options',
'boost_regex',
'boost_system',
'boost_thread'
]
# We prefer static libraries for boost
if env.get('BOOST_ROOT'):
# Need to add boost_thread. Not needed when dynamic linking is used.
boost_libs += ['boost_thread']
static_libs = ['%s/stage/lib/lib%s.a' % (env['BOOST_ROOT'], l) for
l in boost_libs]
if all(os.path.exists(f) for f in static_libs):
@@ -315,33 +328,53 @@ def config_env(toolchain, variant, env):
'-rdynamic'
])
if variant == 'debug':
env.Append(CCFLAGS=[
'-g'
])
elif variant == 'release':
if variant == 'release':
env.Append(CCFLAGS=[
'-O3',
'-fno-strict-aliasing'
])
if toolchain != 'msvc':
git = Beast.Git(env)
if git.exists:
id = '%s+%s.%s' % (git.tags, git.user, git.branch)
env.Append(CPPDEFINES={'GIT_COMMIT_ID' : '\'"%s"\'' % id })
if toolchain == 'clang':
if Beast.system.osx:
env.Replace(CC='clang', CXX='clang++', LINK='clang++')
elif 'CLANG_CC' in env and 'CLANG_CXX' in env and 'CLANG_LINK' in env:
env.Replace(CC=env['CLANG_CC'], CXX=env['CLANG_CXX'], LINK=env['CLANG_LINK'])
env.Replace(CC=env['CLANG_CC'],
CXX=env['CLANG_CXX'],
LINK=env['CLANG_LINK'])
# C and C++
# Add '-Wshorten-64-to-32'
env.Append(CCFLAGS=[])
# C++ only
env.Append(CXXFLAGS=['-Wno-mismatched-tags'])
env.Append(CXXFLAGS=[
'-Wno-mismatched-tags',
'-Wno-deprecated-register',
])
elif toolchain == 'gcc':
if 'GNU_CC' in env and 'GNU_CXX' in env and 'GNU_LINK' in env:
env.Replace(CC=env['GNU_CC'], CXX=env['GNU_CXX'], LINK=env['GNU_LINK'])
env.Replace(CC=env['GNU_CC'],
CXX=env['GNU_CXX'],
LINK=env['GNU_LINK'])
# Why is this only for gcc?!
env.Append(CCFLAGS=['-Wno-unused-local-typedefs'])
# If we are in debug mode, use GCC-specific functionality to add
# extra error checking into the code (e.g. std::vector will throw
# for out-of-bounds conditions)
if variant == 'debug':
env.Append(CPPDEFINES={
'_FORTIFY_SOURCE': 2
})
env.Append(CCFLAGS=[
'-O0'
])
elif toolchain == 'msvc':
env.Append (CPPPATH=[
os.path.join('src', 'protobuf', 'src'),
@@ -430,14 +463,6 @@ def config_env(toolchain, variant, env):
#-------------------------------------------------------------------------------
def addSource(path, env, variant_dirs, CPPPATH=[]):
if CPPPATH:
env = env.Clone()
env.Prepend(CPPPATH=CPPPATH)
return env.Object(Beast.variantFile(path, variant_dirs))
#-------------------------------------------------------------------------------
# Configure the base construction environment
root_dir = Dir('#').srcnode().get_abspath() # Path to this SConstruct file
build_dir = os.path.join('build')
@@ -476,7 +501,7 @@ else:
default_toolchain = 'clang'
else:
raise ValueError("Don't understand toolchains in " + str(toolchains))
default_variant = 'debug'
default_variant = 'release'
default_target = None
for source in [
@@ -488,6 +513,25 @@ for source in [
PROTOCOUTDIR=os.path.join(build_dir, 'proto'),
PROTOCPYTHONOUTDIR=None)
#-------------------------------------------------------------------------------
class ObjectBuilder(object):
def __init__(self, env, variant_dirs):
self.env = env
self.variant_dirs = variant_dirs
self.objects = []
def add_source_files(self, *filenames, **kwds):
for filename in filenames:
env = self.env
if kwds:
env = env.Clone()
env.Prepend(**kwds)
path = UNITY_BUILD_DIRECTORY + filename
o = env.Object(Beast.variantFile(path, self.variant_dirs))
self.objects.append(o)
# Declare the targets
aliases = collections.defaultdict(list)
msvc_configs = []
@@ -506,82 +550,110 @@ for toolchain in all_toolchains:
}
for dest, source in variant_dirs.iteritems():
env.VariantDir(dest, source, duplicate=0)
objects = []
objects.append(addSource('src/ripple/unity/app.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/app1.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/app2.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/app3.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/app4.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/app5.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/app6.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/app7.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/app8.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/app9.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/basics.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/beast.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/beastc.c', env, variant_dirs))
objects.append(addSource('src/ripple/unity/common.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/core.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/data.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/http.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/json.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/net.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/overlay.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/peerfinder.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/protobuf.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/ripple.proto.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/radmap.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/resource.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/rpcx.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/sitefiles.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/sslutil.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/testoverlay.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/types.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/validators.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/websocket.cpp', env, variant_dirs))
objects.append(addSource('src/ripple/unity/nodestore.cpp', env, variant_dirs, [
'src/leveldb/include',
#'src/hyperleveldb/include', # hyper
'src/rocksdb/include',
]))
object_builder = ObjectBuilder(env, variant_dirs)
object_builder.add_source_files(
'app.cpp',
'app1.cpp',
'app2.cpp',
'app3.cpp',
'app4.cpp',
'app5.cpp',
'app6.cpp',
'app7.cpp',
'app8.cpp',
'app9.cpp',
'basics.cpp',
'beast.cpp',
'common.cpp',
'core.cpp',
'data.cpp',
'http.cpp',
'json.cpp',
'net.cpp',
'overlay.cpp',
'peerfinder.cpp',
'protobuf.cpp',
'ripple.proto.cpp',
'resource.cpp',
'rpcx.cpp',
'sitefiles.cpp',
'sslutil.cpp',
'types.cpp',
'validators.cpp',
'websocket.cpp',
)
objects.append(addSource('src/ripple/unity/leveldb.cpp', env, variant_dirs, [
'src/leveldb/',
'src/leveldb/include',
'src/snappy/snappy',
'src/snappy/config',
]))
object_builder.add_source_files(
'beastc.c',
CCFLAGS=['-Wno-array-bounds'])
objects.append(addSource('src/ripple/unity/hyperleveldb.cpp', env, variant_dirs, [
'src/hyperleveldb',
'src/snappy/snappy',
'src/snappy/config',
]))
object_builder.add_source_files(
'nodestore.cpp',
CPPPATH=[
'src/leveldb/include',
#'src/hyperleveldb/include', # hyper
'src/rocksdb2/include',
]
)
objects.append(addSource('src/ripple/unity/rocksdb.cpp', env, variant_dirs, [
'src/rocksdb',
'src/rocksdb/include',
'src/snappy/snappy',
'src/snappy/config',
]))
if 'gcc' in toolchain:
no_uninitialized_warning = {'CCFLAGS': ['-Wno-maybe-uninitialized']}
else:
no_uninitialized_warning = {}
objects.append(addSource('src/ripple/unity/snappy.cpp', env, variant_dirs, [
'src/snappy/snappy',
'src/snappy/config',
]))
object_builder.add_source_files(
'leveldb.cpp',
CPPPATH=[
'src/leveldb/',
'src/leveldb/include',
'src/snappy/snappy',
'src/snappy/config',
],
**no_uninitialized_warning
)
object_builder.add_source_files(
'hyperleveldb.cpp',
CPPPATH=[
'src/hyperleveldb',
'src/snappy/snappy',
'src/snappy/config',
],
**no_uninitialized_warning
)
object_builder.add_source_files(
'rocksdb.cpp',
CPPPATH=[
'src/rocksdb2',
'src/rocksdb2/include',
'src/snappy/snappy',
'src/snappy/config',
],
**no_uninitialized_warning
)
object_builder.add_source_files(
'snappy.cpp',
CCFLAGS=['-Wno-unused-function'],
CPPPATH=[
'src/snappy/snappy',
'src/snappy/config',
]
)
if toolchain == "clang" and Beast.system.osx:
objects.append(addSource('src/ripple/unity/beastobjc.mm', env, variant_dirs))
object_builder.add_source_files('beastobjc.mm')
target = env.Program(
target = os.path.join(variant_dir, 'rippled'),
source = objects
target=os.path.join(variant_dir, 'rippled'),
source=object_builder.objects
)
if toolchain == default_toolchain and variant == default_variant:
default_target = target
install_target = env.Install (build_dir, source = default_target)
install_target = env.Install (build_dir, source=default_target)
env.Alias ('install', install_target)
env.Default (install_target)
aliases['all'].extend(install_target)

1
bin/LT Symbolic link
View File

@@ -0,0 +1 @@
LedgerTool.py

24
bin/LedgerTool.py Executable file
View File

@@ -0,0 +1,24 @@
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import traceback
from ripple.ledger import Server
from ripple.ledger.commands import Cache, Info, Print
from ripple.ledger.Args import ARGS
from ripple.util import Log
from ripple.util.CommandList import CommandList
_COMMANDS = CommandList(Cache, Info, Print)
if __name__ == '__main__':
try:
server = Server.Server()
args = list(ARGS.command)
_COMMANDS.run_safe(args.pop(0), server, *args)
except Exception as e:
if ARGS.verbose:
print(traceback.format_exc(), sys.stderr)
Log.error(e)

8
bin/README.md Normal file
View File

@@ -0,0 +1,8 @@
Unit Tests
==========
To run the Python unit tests, execute:
python -m unittest discover
from this directory.

251
bin/decorator.py Normal file
View File

@@ -0,0 +1,251 @@
########################## LICENCE ###############################
# Copyright (c) 2005-2012, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
__version__ = '3.4.0'
__all__ = ["decorator", "FunctionMaker", "contextmanager"]
import sys, re, inspect
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
def get_init(cls):
return cls.__init__.im_func
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = \
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1]
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.func_defaults = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
# print >> sys.stderr, 'Compiling %s' % src
exec code in evaldict
except:
print >> sys.stderr, 'Error in generated code:'
print >> sys.stderr, src
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an attribute
__source__ is added to the result. The attributes attrs are added,
if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] #strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.func_globals.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
fun = getfullargspec(callerfunc).args[1] # second arg
elif inspect.isfunction(caller):
name = '_lambda_' if caller.__name__ == '<lambda>' \
else caller.__name__
callerfunc = caller
doc = caller.__doc__
fun = getfullargspec(callerfunc).args[0] # first arg
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.im_func
doc = caller.__call__.__doc__
fun = getfullargspec(callerfunc).args[1] # second arg
evaldict = callerfunc.func_globals.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (name, fun),
'return decorator(_call_, %s)' % fun,
evaldict, undecorated=caller, __wrapped__=caller,
doc=doc, module=caller.__module__)
######################### contextmanager ########################
def __call__(self, func):
'Context manager decorator'
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
ContextManager = type(
'ContextManager', (_GeneratorContextManager,), dict(__call__=__call__))
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager
def __init__(self, f, *a, **k):
return GeneratorContextManager.__init__(self, f(*a, **k))
ContextManager = type(
'ContextManager', (GeneratorContextManager,),
dict(__call__=__call__, __init__=__init__))
contextmanager = decorator(ContextManager)

View File

@@ -0,0 +1,4 @@
from .jsonpath import *
from .parser import parse
__version__ = '1.3.0'

510
bin/jsonpath_rw/jsonpath.py Normal file
View File

@@ -0,0 +1,510 @@
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
import logging
import six
from six.moves import xrange
from itertools import *
logger = logging.getLogger(__name__)
# Turn on/off the automatic creation of id attributes
# ... could be a kwarg pervasively but uses are rare and simple today
auto_id_field = None
class JSONPath(object):
"""
The base class for JSONPath abstract syntax; those
methods stubbed here are the interface to supported
JSONPath semantics.
"""
def find(self, data):
"""
All `JSONPath` types support `find()`, which returns an iterable of `DatumInContext`s.
They keep track of the path followed to the current location, so if the calling code
has some opinion about that, it can be passed in here as a starting point.
"""
raise NotImplementedError()
def update(self, data, val):
"Returns `data` with the specified path replaced by `val`"
raise NotImplementedError()
def child(self, child):
"""
Equivalent to Child(self, next) but with some canonicalization
"""
if isinstance(self, This) or isinstance(self, Root):
return child
elif isinstance(child, This):
return self
elif isinstance(child, Root):
return child
else:
return Child(self, child)
def make_datum(self, value):
if isinstance(value, DatumInContext):
return value
else:
return DatumInContext(value, path=Root(), context=None)
class DatumInContext(object):
"""
Represents a datum along a path from a context.
Essentially a zipper but with a structure represented by JsonPath,
and where the context is more of a parent pointer than a proper
representation of the context.
For quick-and-dirty work, this proxies any non-special attributes
to the underlying datum, but the actual datum can (and usually should)
be retrieved via the `value` attribute.
To place `datum` within another, use `datum.in_context(context=..., path=...)`
which extends the path. If the datum already has a context, it places the entire
context within that passed in, so an object can be built from the inside
out.
"""
@classmethod
def wrap(cls, data):
if isinstance(data, cls):
return data
else:
return cls(data)
def __init__(self, value, path=None, context=None):
self.value = value
self.path = path or This()
self.context = None if context is None else DatumInContext.wrap(context)
def in_context(self, context, path):
context = DatumInContext.wrap(context)
if self.context:
return DatumInContext(value=self.value, path=self.path, context=context.in_context(path=path, context=context))
else:
return DatumInContext(value=self.value, path=path, context=context)
@property
def full_path(self):
return self.path if self.context is None else self.context.full_path.child(self.path)
@property
def id_pseudopath(self):
"""
Looks like a path, but with ids stuck in when available
"""
try:
pseudopath = Fields(str(self.value[auto_id_field]))
except (TypeError, AttributeError, KeyError): # This may not be all the interesting exceptions
pseudopath = self.path
if self.context:
return self.context.id_pseudopath.child(pseudopath)
else:
return pseudopath
def __repr__(self):
return '%s(value=%r, path=%r, context=%r)' % (self.__class__.__name__, self.value, self.path, self.context)
def __eq__(self, other):
return isinstance(other, DatumInContext) and other.value == self.value and other.path == self.path and self.context == other.context
class AutoIdForDatum(DatumInContext):
"""
This behaves like a DatumInContext, but the value is
always the path leading up to it, not including the "id",
and with any "id" fields along the way replacing the prior
segment of the path
For example, it will make "foo.bar.id" return a datum
that behaves like DatumInContext(value="foo.bar", path="foo.bar.id").
This is disabled by default; it can be turned on by
settings the `auto_id_field` global to a value other
than `None`.
"""
def __init__(self, datum, id_field=None):
"""
Invariant is that datum.path is the path from context to datum. The auto id
will either be the id in the datum (if present) or the id of the context
followed by the path to the datum.
The path to this datum is always the path to the context, the path to the
datum, and then the auto id field.
"""
self.datum = datum
self.id_field = id_field or auto_id_field
@property
def value(self):
return str(self.datum.id_pseudopath)
@property
def path(self):
return self.id_field
@property
def context(self):
return self.datum
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.datum)
def in_context(self, context, path):
return AutoIdForDatum(self.datum.in_context(context=context, path=path))
def __eq__(self, other):
return isinstance(other, AutoIdForDatum) and other.datum == self.datum and self.id_field == other.id_field
class Root(JSONPath):
"""
The JSONPath referring to the "root" object. Concrete syntax is '$'.
The root is the topmost datum without any context attached.
"""
def find(self, data):
if not isinstance(data, DatumInContext):
return [DatumInContext(data, path=Root(), context=None)]
else:
if data.context is None:
return [DatumInContext(data.value, context=None, path=Root())]
else:
return Root().find(data.context)
def update(self, data, val):
return val
def __str__(self):
return '$'
def __repr__(self):
return 'Root()'
def __eq__(self, other):
return isinstance(other, Root)
class This(JSONPath):
"""
The JSONPath referring to the current datum. Concrete syntax is '@'.
"""
def find(self, datum):
return [DatumInContext.wrap(datum)]
def update(self, data, val):
return val
def __str__(self):
return '`this`'
def __repr__(self):
return 'This()'
def __eq__(self, other):
return isinstance(other, This)
class Child(JSONPath):
"""
JSONPath that first matches the left, then the right.
Concrete syntax is <left> '.' <right>
"""
def __init__(self, left, right):
self.left = left
self.right = right
def find(self, datum):
"""
Extra special case: auto ids do not have children,
so cut it off right now rather than auto id the auto id
"""
return [submatch
for subdata in self.left.find(datum)
if not isinstance(subdata, AutoIdForDatum)
for submatch in self.right.find(subdata)]
def __eq__(self, other):
return isinstance(other, Child) and self.left == other.left and self.right == other.right
def __str__(self):
return '%s.%s' % (self.left, self.right)
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.left, self.right)
class Parent(JSONPath):
"""
JSONPath that matches the parent node of the current match.
Will crash if no such parent exists.
Available via named operator `parent`.
"""
def find(self, datum):
datum = DatumInContext.wrap(datum)
return [datum.context]
def __eq__(self, other):
return isinstance(other, Parent)
def __str__(self):
return '`parent`'
def __repr__(self):
return 'Parent()'
class Where(JSONPath):
"""
JSONPath that first matches the left, and then
filters for only those nodes that have
a match on the right.
WARNING: Subject to change. May want to have "contains"
or some other better word for it.
"""
def __init__(self, left, right):
self.left = left
self.right = right
def find(self, data):
return [subdata for subdata in self.left.find(data) if self.right.find(data)]
def __str__(self):
return '%s where %s' % (self.left, self.right)
def __eq__(self, other):
return isinstance(other, Where) and other.left == self.left and other.right == self.right
class Descendants(JSONPath):
"""
JSONPath that matches first the left expression then any descendant
of it which matches the right expression.
"""
def __init__(self, left, right):
self.left = left
self.right = right
def find(self, datum):
# <left> .. <right> ==> <left> . (<right> | *..<right> | [*]..<right>)
#
# With with a wonky caveat that since Slice() has funky coercions
# we cannot just delegate to that equivalence or we'll hit an
# infinite loop. So right here we implement the coercion-free version.
# Get all left matches into a list
left_matches = self.left.find(datum)
if not isinstance(left_matches, list):
left_matches = [left_matches]
def match_recursively(datum):
right_matches = self.right.find(datum)
# Manually do the * or [*] to avoid coercion and recurse just the right-hand pattern
if isinstance(datum.value, list):
recursive_matches = [submatch
for i in range(0, len(datum.value))
for submatch in match_recursively(DatumInContext(datum.value[i], context=datum, path=Index(i)))]
elif isinstance(datum.value, dict):
recursive_matches = [submatch
for field in datum.value.keys()
for submatch in match_recursively(DatumInContext(datum.value[field], context=datum, path=Fields(field)))]
else:
recursive_matches = []
return right_matches + list(recursive_matches)
# TODO: repeatable iterator instead of list?
return [submatch
for left_match in left_matches
for submatch in match_recursively(left_match)]
def is_singular():
return False
def __str__(self):
return '%s..%s' % (self.left, self.right)
def __eq__(self, other):
return isinstance(other, Descendants) and self.left == other.left and self.right == other.right
class Union(JSONPath):
"""
JSONPath that returns the union of the results of each match.
This is pretty shoddily implemented for now. The nicest semantics
in case of mismatched bits (list vs atomic) is to put
them all in a list, but I haven't done that yet.
WARNING: Any appearance of this being the _concatenation_ is
coincidence. It may even be a bug! (or laziness)
"""
def __init__(self, left, right):
self.left = left
self.right = right
def is_singular(self):
return False
def find(self, data):
return self.left.find(data) + self.right.find(data)
class Intersect(JSONPath):
"""
JSONPath for bits that match *both* patterns.
This can be accomplished a couple of ways. The most
efficient is to actually build the intersected
AST as in building a state machine for matching the
intersection of regular languages. The next
idea is to build a filtered data and match against
that.
"""
def __init__(self, left, right):
self.left = left
self.right = right
def is_singular(self):
return False
def find(self, data):
raise NotImplementedError()
class Fields(JSONPath):
"""
JSONPath referring to some field of the current object.
Concrete syntax ix comma-separated field names.
WARNING: If '*' is any of the field names, then they will
all be returned.
"""
def __init__(self, *fields):
self.fields = fields
def get_field_datum(self, datum, field):
if field == auto_id_field:
return AutoIdForDatum(datum)
else:
try:
field_value = datum.value[field] # Do NOT use `val.get(field)` since that confuses None as a value and None due to `get`
return DatumInContext(value=field_value, path=Fields(field), context=datum)
except (TypeError, KeyError, AttributeError):
return None
def reified_fields(self, datum):
if '*' not in self.fields:
return self.fields
else:
try:
fields = tuple(datum.value.keys())
return fields if auto_id_field is None else fields + (auto_id_field,)
except AttributeError:
return ()
def find(self, datum):
datum = DatumInContext.wrap(datum)
return [field_datum
for field_datum in [self.get_field_datum(datum, field) for field in self.reified_fields(datum)]
if field_datum is not None]
def __str__(self):
return ','.join(self.fields)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, ','.join(map(repr, self.fields)))
def __eq__(self, other):
return isinstance(other, Fields) and tuple(self.fields) == tuple(other.fields)
class Index(JSONPath):
"""
JSONPath that matches indices of the current datum, or none if not large enough.
Concrete syntax is brackets.
WARNING: If the datum is not long enough, it will not crash but will not match anything.
NOTE: For the concrete syntax of `[*]`, the abstract syntax is a Slice() with no parameters (equiv to `[:]`
"""
def __init__(self, index):
self.index = index
def find(self, datum):
datum = DatumInContext.wrap(datum)
if len(datum.value) > self.index:
return [DatumInContext(datum.value[self.index], path=self, context=datum)]
else:
return []
def __eq__(self, other):
return isinstance(other, Index) and self.index == other.index
def __str__(self):
return '[%i]' % self.index
class Slice(JSONPath):
"""
JSONPath matching a slice of an array.
Because of a mismatch between JSON and XML when schema-unaware,
this always returns an iterable; if the incoming data
was not a list, then it returns a one element list _containing_ that
data.
Consider these two docs, and their schema-unaware translation to JSON:
<a><b>hello</b></a> ==> {"a": {"b": "hello"}}
<a><b>hello</b><b>goodbye</b></a> ==> {"a": {"b": ["hello", "goodbye"]}}
If there were a schema, it would be known that "b" should always be an
array (unless the schema were wonky, but that is too much to fix here)
so when querying with JSON if the one writing the JSON knows that it
should be an array, they can write a slice operator and it will coerce
a non-array value to an array.
This may be a bit unfortunate because it would be nice to always have
an iterator, but dictionaries and other objects may also be iterable,
so this is the compromise.
"""
def __init__(self, start=None, end=None, step=None):
self.start = start
self.end = end
self.step = step
def find(self, datum):
datum = DatumInContext.wrap(datum)
# Here's the hack. If it is a dictionary or some kind of constant,
# put it in a single-element list
if (isinstance(datum.value, dict) or isinstance(datum.value, six.integer_types) or isinstance(datum.value, six.string_types)):
return self.find(DatumInContext([datum.value], path=datum.path, context=datum.context))
# Some iterators do not support slicing but we can still
# at least work for '*'
if self.start == None and self.end == None and self.step == None:
return [DatumInContext(datum.value[i], path=Index(i), context=datum) for i in xrange(0, len(datum.value))]
else:
return [DatumInContext(datum.value[i], path=Index(i), context=datum) for i in range(0, len(datum.value))[self.start:self.end:self.step]]
def __str__(self):
if self.start == None and self.end == None and self.step == None:
return '[*]'
else:
return '[%s%s%s]' % (self.start or '',
':%d'%self.end if self.end else '',
':%d'%self.step if self.step else '')
def __repr__(self):
return '%s(start=%r,end=%r,step=%r)' % (self.__class__.__name__, self.start, self.end, self.step)
def __eq__(self, other):
return isinstance(other, Slice) and other.start == self.start and self.end == other.end and other.step == self.step

171
bin/jsonpath_rw/lexer.py Normal file
View File

@@ -0,0 +1,171 @@
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
import sys
import logging
import ply.lex
logger = logging.getLogger(__name__)
class JsonPathLexerError(Exception):
pass
class JsonPathLexer(object):
'''
A Lexical analyzer for JsonPath.
'''
def __init__(self, debug=False):
self.debug = debug
if self.__doc__ == None:
raise JsonPathLexerError('Docstrings have been removed! By design of PLY, jsonpath-rw requires docstrings. You must not use PYTHONOPTIMIZE=2 or python -OO.')
def tokenize(self, string):
'''
Maps a string to an iterator over tokens. In other words: [char] -> [token]
'''
new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger)
new_lexer.latest_newline = 0
new_lexer.string_value = None
new_lexer.input(string)
while True:
t = new_lexer.token()
if t is None: break
t.col = t.lexpos - new_lexer.latest_newline
yield t
if new_lexer.string_value is not None:
raise JsonPathLexerError('Unexpected EOF in string literal or identifier')
# ============== PLY Lexer specification ==================
#
# This probably should be private but:
# - the parser requires access to `tokens` (perhaps they should be defined in a third, shared dependency)
# - things like `literals` might be a legitimate part of the public interface.
#
# Anyhow, it is pythonic to give some rope to hang oneself with :-)
literals = ['*', '.', '[', ']', '(', ')', '$', ',', ':', '|', '&']
reserved_words = { 'where': 'WHERE' }
tokens = ['DOUBLEDOT', 'NUMBER', 'ID', 'NAMED_OPERATOR'] + list(reserved_words.values())
states = [ ('singlequote', 'exclusive'),
('doublequote', 'exclusive'),
('backquote', 'exclusive') ]
# Normal lexing, rather easy
t_DOUBLEDOT = r'\.\.'
t_ignore = ' \t'
def t_ID(self, t):
r'[a-zA-Z_@][a-zA-Z0-9_@\-]*'
t.type = self.reserved_words.get(t.value, 'ID')
return t
def t_NUMBER(self, t):
r'-?\d+'
t.value = int(t.value)
return t
# Single-quoted strings
t_singlequote_ignore = ''
def t_singlequote(self, t):
r"'"
t.lexer.string_start = t.lexer.lexpos
t.lexer.string_value = ''
t.lexer.push_state('singlequote')
def t_singlequote_content(self, t):
r"[^'\\]+"
t.lexer.string_value += t.value
def t_singlequote_escape(self, t):
r'\\.'
t.lexer.string_value += t.value[1]
def t_singlequote_end(self, t):
r"'"
t.value = t.lexer.string_value
t.type = 'ID'
t.lexer.string_value = None
t.lexer.pop_state()
return t
def t_singlequote_error(self, t):
raise JsonPathLexerError('Error on line %s, col %s while lexing singlequoted field: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
# Double-quoted strings
t_doublequote_ignore = ''
def t_doublequote(self, t):
r'"'
t.lexer.string_start = t.lexer.lexpos
t.lexer.string_value = ''
t.lexer.push_state('doublequote')
def t_doublequote_content(self, t):
r'[^"\\]+'
t.lexer.string_value += t.value
def t_doublequote_escape(self, t):
r'\\.'
t.lexer.string_value += t.value[1]
def t_doublequote_end(self, t):
r'"'
t.value = t.lexer.string_value
t.type = 'ID'
t.lexer.string_value = None
t.lexer.pop_state()
return t
def t_doublequote_error(self, t):
raise JsonPathLexerError('Error on line %s, col %s while lexing doublequoted field: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
# Back-quoted "magic" operators
t_backquote_ignore = ''
def t_backquote(self, t):
r'`'
t.lexer.string_start = t.lexer.lexpos
t.lexer.string_value = ''
t.lexer.push_state('backquote')
def t_backquote_escape(self, t):
r'\\.'
t.lexer.string_value += t.value[1]
def t_backquote_content(self, t):
r"[^`\\]+"
t.lexer.string_value += t.value
def t_backquote_end(self, t):
r'`'
t.value = t.lexer.string_value
t.type = 'NAMED_OPERATOR'
t.lexer.string_value = None
t.lexer.pop_state()
return t
def t_backquote_error(self, t):
raise JsonPathLexerError('Error on line %s, col %s while lexing backquoted operator: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
# Counting lines, handling errors
def t_newline(self, t):
r'\n'
t.lexer.lineno += 1
t.lexer.latest_newline = t.lexpos
def t_error(self, t):
raise JsonPathLexerError('Error on line %s, col %s: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
if __name__ == '__main__':
logging.basicConfig()
lexer = JsonPathLexer(debug=True)
for token in lexer.tokenize(sys.stdin.read()):
print('%-20s%s' % (token.value, token.type))

187
bin/jsonpath_rw/parser.py Normal file
View File

@@ -0,0 +1,187 @@
from __future__ import print_function, absolute_import, division, generators, nested_scopes
import sys
import os.path
import logging
import ply.yacc
from jsonpath_rw.jsonpath import *
from jsonpath_rw.lexer import JsonPathLexer
logger = logging.getLogger(__name__)
def parse(string):
return JsonPathParser().parse(string)
class JsonPathParser(object):
'''
An LALR-parser for JsonPath
'''
tokens = JsonPathLexer.tokens
def __init__(self, debug=False, lexer_class=None):
if self.__doc__ == None:
raise Exception('Docstrings have been removed! By design of PLY, jsonpath-rw requires docstrings. You must not use PYTHONOPTIMIZE=2 or python -OO.')
self.debug = debug
self.lexer_class = lexer_class or JsonPathLexer # Crufty but works around statefulness in PLY
def parse(self, string, lexer = None):
lexer = lexer or self.lexer_class()
return self.parse_token_stream(lexer.tokenize(string))
def parse_token_stream(self, token_iterator, start_symbol='jsonpath'):
# Since PLY has some crufty aspects and dumps files, we try to keep them local
# However, we need to derive the name of the output Python file :-/
output_directory = os.path.dirname(__file__)
try:
module_name = os.path.splitext(os.path.split(__file__)[1])[0]
except:
module_name = __name__
parsing_table_module = '_'.join([module_name, start_symbol, 'parsetab'])
# And we regenerate the parse table every time; it doesn't actually take that long!
new_parser = ply.yacc.yacc(module=self,
debug=self.debug,
tabmodule = parsing_table_module,
outputdir = output_directory,
write_tables=0,
start = start_symbol,
errorlog = logger)
return new_parser.parse(lexer = IteratorToTokenStream(token_iterator))
# ===================== PLY Parser specification =====================
precedence = [
('left', ','),
('left', 'DOUBLEDOT'),
('left', '.'),
('left', '|'),
('left', '&'),
('left', 'WHERE'),
]
def p_error(self, t):
raise Exception('Parse error at %s:%s near token %s (%s)' % (t.lineno, t.col, t.value, t.type))
def p_jsonpath_binop(self, p):
"""jsonpath : jsonpath '.' jsonpath
| jsonpath DOUBLEDOT jsonpath
| jsonpath WHERE jsonpath
| jsonpath '|' jsonpath
| jsonpath '&' jsonpath"""
op = p[2]
if op == '.':
p[0] = Child(p[1], p[3])
elif op == '..':
p[0] = Descendants(p[1], p[3])
elif op == 'where':
p[0] = Where(p[1], p[3])
elif op == '|':
p[0] = Union(p[1], p[3])
elif op == '&':
p[0] = Intersect(p[1], p[3])
def p_jsonpath_fields(self, p):
"jsonpath : fields_or_any"
p[0] = Fields(*p[1])
def p_jsonpath_named_operator(self, p):
"jsonpath : NAMED_OPERATOR"
if p[1] == 'this':
p[0] = This()
elif p[1] == 'parent':
p[0] = Parent()
else:
raise Exception('Unknown named operator `%s` at %s:%s' % (p[1], p.lineno(1), p.lexpos(1)))
def p_jsonpath_root(self, p):
"jsonpath : '$'"
p[0] = Root()
def p_jsonpath_idx(self, p):
"jsonpath : '[' idx ']'"
p[0] = p[2]
def p_jsonpath_slice(self, p):
"jsonpath : '[' slice ']'"
p[0] = p[2]
def p_jsonpath_fieldbrackets(self, p):
"jsonpath : '[' fields ']'"
p[0] = Fields(*p[2])
def p_jsonpath_child_fieldbrackets(self, p):
"jsonpath : jsonpath '[' fields ']'"
p[0] = Child(p[1], Fields(*p[3]))
def p_jsonpath_child_idxbrackets(self, p):
"jsonpath : jsonpath '[' idx ']'"
p[0] = Child(p[1], p[3])
def p_jsonpath_child_slicebrackets(self, p):
"jsonpath : jsonpath '[' slice ']'"
p[0] = Child(p[1], p[3])
def p_jsonpath_parens(self, p):
"jsonpath : '(' jsonpath ')'"
p[0] = p[2]
# Because fields in brackets cannot be '*' - that is reserved for array indices
def p_fields_or_any(self, p):
"""fields_or_any : fields
| '*' """
if p[1] == '*':
p[0] = ['*']
else:
p[0] = p[1]
def p_fields_id(self, p):
"fields : ID"
p[0] = [p[1]]
def p_fields_comma(self, p):
"fields : fields ',' fields"
p[0] = p[1] + p[3]
def p_idx(self, p):
"idx : NUMBER"
p[0] = Index(p[1])
def p_slice_any(self, p):
"slice : '*'"
p[0] = Slice()
def p_slice(self, p): # Currently does not support `step`
"slice : maybe_int ':' maybe_int"
p[0] = Slice(start=p[1], end=p[3])
def p_maybe_int(self, p):
"""maybe_int : NUMBER
| empty"""
p[0] = p[1]
def p_empty(self, p):
'empty :'
p[0] = None
class IteratorToTokenStream(object):
def __init__(self, iterator):
self.iterator = iterator
def token(self):
try:
return next(self.iterator)
except StopIteration:
return None
if __name__ == '__main__':
logging.basicConfig()
parser = JsonPathParser(debug=True)
print(parser.parse(sys.stdin.read()))

4
bin/ply/__init__.py Normal file
View File

@@ -0,0 +1,4 @@
# PLY package
# Author: David Beazley (dave@dabeaz.com)
__all__ = ['lex','yacc']

898
bin/ply/cpp.py Normal file
View File

@@ -0,0 +1,898 @@
# -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU]|[lL]|[uU][lL]|[lL][uU])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT(t):
r'(/\*(.|\n)*?\*/)|(//.*?\n)'
t.lexer.lineno += t.value.count("\n")
return t
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
i += 1
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except StandardError:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
for tok in x:
if tok in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,(str,unicode)):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)

133
bin/ply/ctokens.py Normal file
View File

@@ -0,0 +1,133 @@
# ----------------------------------------------------------------------
# ctokens.py
#
# Token specifications for symbols in ANSI C and C++. This file is
# meant to be used as a library in other tokenizers.
# ----------------------------------------------------------------------
# Reserved words
tokens = [
# Literals (identifier, integer constant, float constant, string constant, char const)
'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
# Increment/decrement (++,--)
'PLUSPLUS', 'MINUSMINUS',
# Structure dereference (->)
'ARROW',
# Ternary operator (?)
'TERNARY',
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'COMMA', 'PERIOD', 'SEMI', 'COLON',
# Ellipsis (...)
'ELLIPSIS',
]
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MODULO = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'^='
# Increment/decrement
t_INCREMENT = r'\+\+'
t_DECREMENT = r'--'
# ->
t_ARROW = r'->'
# ?
t_TERNARY = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
# Identifiers
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
# Integer literal
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
# Floating literal
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
# Character constant 'c' or L'c'
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
# Comment (C-Style)
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t
# Comment (C++-Style)
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t

1058
bin/ply/lex.py Normal file

File diff suppressed because it is too large Load Diff

3276
bin/ply/yacc.py Normal file

File diff suppressed because it is too large Load Diff

0
bin/ripple/__init__.py Normal file
View File

187
bin/ripple/ledger/Args.py Normal file
View File

@@ -0,0 +1,187 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import importlib
import os
from ripple.ledger import LedgerNumber
from ripple.util import File
from ripple.util import Log
from ripple.util import PrettyPrint
from ripple.util import Range
from ripple.util.Function import Function
NAME = 'LedgerTool'
VERSION = '0.1'
NONE = '(none)'
_parser = argparse.ArgumentParser(
prog=NAME,
description='Retrieve and process Ripple ledgers.',
epilog=LedgerNumber.HELP,
)
# Positional arguments.
_parser.add_argument(
'command',
nargs='*',
help='Command to execute.'
)
# Flag arguments.
_parser.add_argument(
'--binary',
action='store_true',
help='If true, searches are binary - by default linear search is used.',
)
_parser.add_argument(
'--cache',
default='~/.local/share/ripple/ledger',
help='The cache directory.',
)
_parser.add_argument(
'--complete',
action='store_true',
help='If set, only match complete ledgers.',
)
_parser.add_argument(
'--condition', '-c',
help='The name of a condition function used to match ledgers.',
)
_parser.add_argument(
'--config',
help='The rippled configuration file name.',
)
_parser.add_argument(
'--database', '-d',
nargs='*',
default=NONE,
help='Specify a database.',
)
_parser.add_argument(
'--display',
help='Specify a function to display ledgers.',
)
_parser.add_argument(
'--full', '-f',
action='store_true',
help='If true, request full ledgers.',
)
_parser.add_argument(
'--indent', '-i',
type=int,
default=2,
help='How many spaces to indent when display in JSON.',
)
_parser.add_argument(
'--offline', '-o',
action='store_true',
help='If true, work entirely from cache, do not try to contact the server.',
)
_parser.add_argument(
'--position', '-p',
choices=['all', 'first', 'last'],
default='last',
help='Select which ledgers to display.',
)
_parser.add_argument(
'--rippled', '-r',
help='The filename of a rippled binary for retrieving ledgers.',
)
_parser.add_argument(
'--server', '-s',
help='IP address of a rippled JSON server.',
)
_parser.add_argument(
'--utc', '-u',
action='store_true',
help='If true, display times in UTC rather than local time.',
)
_parser.add_argument(
'--validations',
default=3,
help='The number of validations needed before considering a ledger valid.',
)
_parser.add_argument(
'--version',
action='version',
version='%(prog)s ' + VERSION,
help='Print the current version of %(prog)s',
)
_parser.add_argument(
'--verbose', '-v',
action='store_true',
help='If true, give status messages on stderr.',
)
_parser.add_argument(
'--window', '-w',
type=int,
default=0,
help='How many ledgers to display around the matching ledger.',
)
_parser.add_argument(
'--yes', '-y',
action='store_true',
help='If true, don\'t ask for confirmation on large commands.',
)
# Read the arguments from the command line.
ARGS = _parser.parse_args()
ARGS.NONE = NONE
Log.VERBOSE = ARGS.verbose
# Now remove any items that look like ledger numbers from the command line.
_command = ARGS.command
_parts = (ARGS.command, ARGS.ledgers) = ([], [])
for c in _command:
_parts[Range.is_range(c, *LedgerNumber.LEDGERS)].append(c)
ARGS.command = ARGS.command or ['print' if ARGS.ledgers else 'info']
ARGS.cache = File.normalize(ARGS.cache)
if not ARGS.ledgers:
if ARGS.condition:
Log.warn('--condition needs a range of ledgers')
if ARGS.display:
Log.warn('--display needs a range of ledgers')
ARGS.condition = Function(
ARGS.condition or 'all_ledgers', 'ripple.ledger.conditions')
ARGS.display = Function(
ARGS.display or 'ledger_number', 'ripple.ledger.displays')
if ARGS.window < 0:
raise ValueError('Window cannot be negative: --window=%d' %
ARGS.window)
PrettyPrint.INDENT = (ARGS.indent * ' ')
_loaders = (ARGS.database != NONE) + bool(ARGS.rippled) + bool(ARGS.server)
if not _loaders:
ARGS.rippled = 'rippled'
elif _loaders > 1:
raise ValueError('At most one of --database, --rippled and --server '
'may be specified')

View File

@@ -0,0 +1,78 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import subprocess
from ripple.ledger.Args import ARGS
from ripple.util import ConfigFile
from ripple.util import Database
from ripple.util import File
from ripple.util import Log
from ripple.util import Range
LEDGER_QUERY = """
SELECT
L.*, count(1) validations
FROM
(select LedgerHash, LedgerSeq from Ledgers ORDER BY LedgerSeq DESC) L
JOIN Validations V
ON (V.LedgerHash = L.LedgerHash)
GROUP BY L.LedgerHash
HAVING validations >= {validation_quorum}
ORDER BY 2;
"""
COMPLETE_QUERY = """
SELECT
L.LedgerSeq, count(*) validations
FROM
(select LedgerHash, LedgerSeq from Ledgers ORDER BY LedgerSeq) L
JOIN Validations V
ON (V.LedgerHash = L.LedgerHash)
GROUP BY L.LedgerHash
HAVING validations >= :validation_quorum
ORDER BY 2;
"""
_DATABASE_NAME = 'ledger.db'
USE_PLACEHOLDERS = False
class DatabaseReader(object):
def __init__(self, config):
assert ARGS.database != ARGS.NONE
database = ARGS.database or config['database_path']
if not database.endswith(_DATABASE_NAME):
database = os.path.join(database, _DATABASE_NAME)
if USE_PLACEHOLDERS:
cursor = Database.fetchall(
database, COMPLETE_QUERY, config)
else:
cursor = Database.fetchall(
database, LEDGER_QUERY.format(**config), {})
self.complete = [c[1] for c in cursor]
def name_to_ledger_index(self, ledger_name, is_full=False):
if not self.complete:
return None
if ledger_name == 'closed':
return self.complete[-1]
if ledger_name == 'current':
return None
if ledger_name == 'validated':
return self.complete[-1]
def get_ledger(self, name, is_full=False):
cmd = ['ledger', str(name)]
if is_full:
cmd.append('full')
response = self._command(*cmd)
result = response.get('ledger')
if result:
return result
error = response['error']
etext = _ERROR_TEXT.get(error)
if etext:
error = '%s (%s)' % (etext, error)
Log.fatal(_ERROR_TEXT.get(error, error))

View File

@@ -0,0 +1,18 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.util import Range
FIRST_EVER = 32570
LEDGERS = {
'closed': 'the most recently closed ledger',
'current': 'the current ledger',
'first': 'the first complete ledger on this server',
'last': 'the last complete ledger on this server',
'validated': 'the most recently validated ledger',
}
HELP = """
Ledgers are either represented by a number, or one of the special ledgers;
""" + ',\n'.join('%s, %s' % (k, v) for k, v in sorted(LEDGERS.items())
)

View File

@@ -0,0 +1,68 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import subprocess
from ripple.ledger.Args import ARGS
from ripple.util import File
from ripple.util import Log
from ripple.util import Range
_ERROR_CODE_REASON = {
62: 'No rippled server is running.',
}
_ERROR_TEXT = {
'lgrNotFound': 'The ledger you requested was not found.',
'noCurrent': 'The server has no current ledger.',
'noNetwork': 'The server did not respond to your request.',
}
_DEFAULT_ERROR_ = "Couldn't connect to server."
class RippledReader(object):
def __init__(self, config):
fname = File.normalize(ARGS.rippled)
if not os.path.exists(fname):
raise Exception('No rippled found at %s.' % fname)
self.cmd = [fname]
if ARGS.config:
self.cmd.extend(['--conf', File.normalize(ARGS.config)])
self.info = self._command('server_info')['info']
c = self.info.get('complete_ledgers')
if c == 'empty':
self.complete = []
else:
self.complete = sorted(Range.from_string(c))
def name_to_ledger_index(self, ledger_name, is_full=False):
return self.get_ledger(ledger_name, is_full)['ledger_index']
def get_ledger(self, name, is_full=False):
cmd = ['ledger', str(name)]
if is_full:
cmd.append('full')
response = self._command(*cmd)
result = response.get('ledger')
if result:
return result
error = response['error']
etext = _ERROR_TEXT.get(error)
if etext:
error = '%s (%s)' % (etext, error)
Log.fatal(_ERROR_TEXT.get(error, error))
def _command(self, *cmds):
cmd = self.cmd + list(cmds)
try:
data = subprocess.check_output(cmd, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
raise Exception(_ERROR_CODE_REASON.get(
e.returncode, _DEFAULT_ERROR_))
part = json.loads(data)
try:
return part['result']
except:
raise ValueError(part.get('error', 'unknown error'))

View File

@@ -0,0 +1,24 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from ripple.ledger.Args import ARGS
from ripple.util import Log
from ripple.util import Range
from ripple.util import Search
def search(server):
"""Yields a stream of ledger numbers that match the given condition."""
condition = lambda number: ARGS.condition(server, number)
ledgers = server.ledgers
if ARGS.binary:
try:
position = Search.FIRST if ARGS.position == 'first' else Search.LAST
yield Search.binary_search(
ledgers[0], ledgers[-1], condition, position)
except:
Log.fatal('No ledgers matching condition "%s".' % condition,
file=sys.stderr)
else:
for x in Search.linear_search(ledgers, condition):
yield x

View File

@@ -0,0 +1,55 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
from ripple.ledger import DatabaseReader, RippledReader
from ripple.ledger.Args import ARGS
from ripple.util.FileCache import FileCache
from ripple.util import ConfigFile
from ripple.util import File
from ripple.util import Range
class Server(object):
def __init__(self):
cfg_file = File.normalize(ARGS.config or 'rippled.cfg')
self.config = ConfigFile.read(open(cfg_file))
if ARGS.database != ARGS.NONE:
reader = DatabaseReader.DatabaseReader(self.config)
else:
reader = RippledReader.RippledReader(self.config)
self.reader = reader
self.complete = reader.complete
names = {
'closed': reader.name_to_ledger_index('closed'),
'current': reader.name_to_ledger_index('current'),
'validated': reader.name_to_ledger_index('validated'),
'first': self.complete[0] if self.complete else None,
'last': self.complete[-1] if self.complete else None,
}
self.__dict__.update(names)
self.ledgers = sorted(Range.join_ranges(*ARGS.ledgers, **names))
def make_cache(is_full):
name = 'full' if is_full else 'summary'
filepath = os.path.join(ARGS.cache, name)
creator = lambda n: reader.get_ledger(n, is_full)
return FileCache(filepath, creator)
self._caches = [make_cache(False), make_cache(True)]
def info(self):
return self.reader.info
def cache(self, is_full):
return self._caches[is_full]
def get_ledger(self, number, is_full=False):
num = int(number)
save_in_cache = num in self.complete
can_create = (not ARGS.offline and
self.complete and
self.complete[0] <= num - 1)
cache = self.cache(is_full)
return cache.get_data(number, save_in_cache, can_create)

View File

@@ -0,0 +1,5 @@
from __future__ import absolute_import, division, print_function, unicode_literals
class ServerReader(object):
def __init__(self, config):
raise ValueError('Direct server connections are not yet implemented.')

View File

View File

@@ -0,0 +1,34 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.ledger.Args import ARGS
from ripple.util import Log
from ripple.util import Range
from ripple.util.PrettyPrint import pretty_print
SAFE = True
HELP = """cache
return server_info"""
def cache(server, clear=False):
cache = server.cache(ARGS.full)
name = ['summary', 'full'][ARGS.full]
files = cache.file_count()
if not files:
Log.error('No files in %s cache.' % name)
elif clear:
if not clear.strip() == 'clear':
raise Exception("Don't understand 'clear %s'." % clear)
if not ARGS.yes:
yes = raw_input('OK to clear %s cache? (y/N) ' % name)
if not yes.lower().startswith('y'):
Log.out('Cancelled.')
return
cache.clear(ARGS.full)
Log.out('%s cache cleared - %d file%s deleted.' %
(name.capitalize(), files, '' if files == 1 else 's'))
else:
caches = (int(c) for c in cache.cache_list())
Log.out(Range.to_string(caches))

View File

@@ -0,0 +1,21 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.ledger.Args import ARGS
from ripple.util import Log
from ripple.util import Range
from ripple.util.PrettyPrint import pretty_print
SAFE = True
HELP = 'info - return server_info'
def info(server):
Log.out('first =', server.first)
Log.out('last =', server.last)
Log.out('closed =', server.closed)
Log.out('current =', server.current)
Log.out('validated =', server.validated)
Log.out('complete =', Range.to_string(server.complete))
if ARGS.full:
Log.out(pretty_print(server.info()))

View File

@@ -0,0 +1,15 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.ledger.Args import ARGS
from ripple.ledger import SearchLedgers
import json
SAFE = True
HELP = """print
Print the ledgers to stdout. The default command."""
def run_print(server):
ARGS.display(print, server, SearchLedgers.search(server))

View File

View File

@@ -0,0 +1,4 @@
from __future__ import absolute_import, division, print_function, unicode_literals
def all_ledgers(server, ledger_number):
return True

View File

@@ -0,0 +1,89 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from functools import wraps
import jsonpath_rw
from ripple.ledger.Args import ARGS
from ripple.util import Dict
from ripple.util import Log
from ripple.util import Range
from ripple.util.Decimal import Decimal
from ripple.util.PrettyPrint import pretty_print, Streamer
TRANSACT_FIELDS = (
'accepted',
'close_time_human',
'closed',
'ledger_index',
'total_coins',
'transactions',
)
LEDGER_FIELDS = (
'accepted',
'accountState',
'close_time_human',
'closed',
'ledger_index',
'total_coins',
'transactions',
)
def _dict_filter(d, keys):
return dict((k, v) for (k, v) in d.items() if k in keys)
def ledger_number(print, server, numbers):
print(Range.to_string(numbers))
def display(f):
@wraps(f)
def wrapper(printer, server, numbers, *args):
streamer = Streamer(printer=printer)
for number in numbers:
ledger = server.get_ledger(number, ARGS.full)
if ledger:
streamer.add(number, f(ledger, *args))
streamer.finish()
return wrapper
def extractor(f):
@wraps(f)
def wrapper(printer, server, numbers, *paths):
try:
find = jsonpath_rw.parse('|'.join(paths)).find
except:
raise ValueError("Can't understand jsonpath '%s'." % path)
def fn(ledger, *args):
return f(find(ledger), *args)
display(fn)(printer, server, numbers)
return wrapper
@display
def ledger(ledger, full=False):
if ARGS.full:
if full:
return ledger
ledger = Dict.prune(ledger, 1, False)
return _dict_filter(ledger, LEDGER_FIELDS)
@display
def prune(ledger, level=1):
return Dict.prune(ledger, level, False)
@display
def transact(ledger):
return _dict_filter(ledger, TRANSACT_FIELDS)
@extractor
def extract(finds):
return dict((str(f.full_path), str(f.value)) for f in finds)
@extractor
def sum(finds):
d = Decimal()
for f in finds:
d.accumulate(f.value)
return [str(d), len(finds)]

40
bin/ripple/util/Cache.py Normal file
View File

@@ -0,0 +1,40 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import defaultdict
class Cache(object):
def __init__(self):
self._value_to_index = {}
self._index_to_value = []
def value_to_index(self, value, **kwds):
index = self._value_to_index.get(value, None)
if index is None:
index = len(self._index_to_value)
self._index_to_value.append((value, kwds))
self._value_to_index[value] = index
return index
def index_to_value(self, index):
return self._index_to_value[index]
def NamedCache():
return defaultdict(Cache)
def cache_by_key(d, keyfunc=None, exclude=None):
cache = defaultdict(Cache)
exclude = exclude or None
keyfunc = keyfunc or (lambda x: x)
def visit(item):
if isinstance(item, list):
for i, x in enumerate(item):
item[i] = visit(x)
elif isinstance(item, dict):
for k, v in item.items():
item[k] = visit(v)
return item
return cache

View File

@@ -0,0 +1,77 @@
from __future__ import absolute_import, division, print_function, unicode_literals
# Code taken from github/rec/grit.
import os
import sys
from collections import namedtuple
from ripple.ledger.Args import ARGS
from ripple.util import Log
Command = namedtuple('Command', 'function help safe')
def make_command(module):
name = module.__name__.split('.')[-1].lower()
return name, Command(getattr(module, name, None) or
getattr(module, 'run_' + name),
getattr(module, 'HELP'),
getattr(module, 'SAFE', False))
class CommandList(object):
def __init__(self, *args, **kwds):
self.registry = {}
self.register(*args, **kwds)
def register(self, *modules, **kwds):
for module in modules:
name, command = make_command(module)
self.registry[name] = command
for k, v in kwds.items():
if not isinstance(v, (list, tuple)):
v = [v]
self.register_one(k, *v)
def keys(self):
return self.registry.keys()
def register_one(self, name, function, help='', safe=False):
assert name not in self.registry
self.registry[name] = Command(function, help, safe)
def _get(self, command):
command = command.lower()
c = self.registry.get(command)
if c:
return command, c
commands = [c for c in self.registry if c.startswith(command)]
if len(commands) == 1:
command = commands[0]
return command, self.registry[command]
if not commands:
raise ValueError('No such command: %s. Commands are %s.' %
(command, ', '.join(sorted(self.registry))))
if len(commands) > 1:
raise ValueError('Command %s was ambiguous: %s.' %
(command, ', '.join(commands)))
def get(self, command):
return self._get(command)[1]
def run(self, command, *args):
return self.get(command).function(*args)
def run_safe(self, command, *args):
name, cmd = self._get(command)
if not (ARGS.yes or cmd.safe):
confirm = raw_input('OK to execute "rl %s %s"? (y/N) ' %
(name, ' '.join(args)))
if not confirm.lower().startswith('y'):
Log.error('Cancelled.')
return
cmd.function(*args)
def help(self, command):
return self.get(command).help()

View File

@@ -0,0 +1,54 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import json
"""Ripple has a proprietary format for their .cfg files, so we need a reader for
them."""
def read(lines):
sections = []
section = []
for line in lines:
line = line.strip()
if (not line) or line[0] == '#':
continue
if line.startswith('['):
if section:
sections.append(section)
section = []
section.append(line)
if section:
sections.append(section)
result = {}
for section in sections:
option = section.pop(0)
assert section, ('No value for option "%s".' % option)
assert option.startswith('[') and option.endswith(']'), (
'No option name in block "%s"' % p[0])
option = option[1:-1]
assert option not in result, 'Duplicate option "%s".' % option
subdict = {}
items = []
for part in section:
if '=' in part:
assert not items, 'Dictionary mixed with list.'
k, v = part.split('=', 1)
assert k not in subdict, 'Repeated dictionary entry ' + k
subdict[k] = v
else:
assert not subdict, 'List mixed with dictionary.'
if part.startswith('{'):
items.append(json.loads(part))
else:
words = part.split()
if len(words) > 1:
items.append(words)
else:
items.append(part)
if len(items) == 1:
result[option] = items[0]
else:
result[option] = items or subdict
return result

View File

@@ -0,0 +1,12 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import sqlite3
def fetchall(database, query, kwds):
conn = sqlite3.connect(database)
try:
cursor = conn.execute(query, kwds)
return cursor.fetchall()
finally:
conn.close()

View File

@@ -0,0 +1,46 @@
from __future__ import absolute_import, division, print_function, unicode_literals
"""Fixed point numbers."""
POSITIONS = 10
POSITIONS_SHIFT = 10 ** POSITIONS
class Decimal(object):
def __init__(self, desc='0'):
if isinstance(desc, int):
self.value = desc
return
if desc.startswith('-'):
sign = -1
desc = desc[1:]
else:
sign = 1
parts = desc.split('.')
if len(parts) == 1:
parts.append('0')
elif len(parts) > 2:
raise Exception('Too many decimals in "%s"' % desc)
number, decimal = parts
# Fix the number of positions.
decimal = (decimal + POSITIONS * '0')[:POSITIONS]
self.value = sign * int(number + decimal)
def accumulate(self, item):
if not isinstance(item, Decimal):
item = Decimal(item)
self.value += item.value
def __str__(self):
if self.value >= 0:
sign = ''
value = self.value
else:
sign = '-'
value = -self.value
number = value // POSITIONS_SHIFT
decimal = (value % POSITIONS_SHIFT) * POSITIONS_SHIFT
if decimal:
return '%s%s.%s' % (sign, number, str(decimal).rstrip('0'))
else:
return '%s%s' % (sign, number)

33
bin/ripple/util/Dict.py Normal file
View File

@@ -0,0 +1,33 @@
from __future__ import absolute_import, division, print_function, unicode_literals
def count_all_subitems(x):
"""Count the subitems of a Python object, including the object itself."""
if isinstance(x, list):
return 1 + sum(count_all_subitems(i) for i in x)
if isinstance(x, dict):
return 1 + sum(count_all_subitems(i) for i in x.itervalues())
return 1
def prune(item, level, count_recursively=True):
def subitems(x):
i = count_all_subitems(x) - 1 if count_recursively else len(x)
return '1 subitem' if i == 1 else '%d subitems' % i
assert level >= 0
if not item:
return item
if isinstance(item, list):
if level:
return [prune(i, level - 1, count_recursively) for i in item]
else:
return '[list with %s]' % subitems(item)
if isinstance(item, dict):
if level:
return dict((k, prune(v, level - 1, count_recursively))
for k, v in item.iteritems())
else:
return '{dict with %s}' % subitems(item)
return item

7
bin/ripple/util/File.py Normal file
View File

@@ -0,0 +1,7 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import os
def normalize(f):
f = os.path.join(*f.split('/')) # For Windows users.
return os.path.abspath(os.path.expanduser(f))

View File

@@ -0,0 +1,56 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import gzip
import json
import os
_NONE = object()
class FileCache(object):
"""A two-level cache, which stores expensive results in memory and on disk.
"""
def __init__(self, cache_directory, creator, open=gzip.open, suffix='.gz'):
self.cache_directory = cache_directory
self.creator = creator
self.open = open
self.suffix = suffix
self.cached_data = {}
if not os.path.exists(self.cache_directory):
os.makedirs(self.cache_directory)
def get_file_data(self, name):
if os.path.exists(filename):
return json.load(self.open(filename))
result = self.creator(name)
return result
def get_data(self, name, save_in_cache, can_create, default=None):
name = str(name)
result = self.cached_data.get(name, _NONE)
if result is _NONE:
filename = os.path.join(self.cache_directory, name) + self.suffix
if os.path.exists(filename):
result = json.load(self.open(filename)) or _NONE
if result is _NONE and can_create:
result = self.creator(name)
if save_in_cache:
json.dump(result, self.open(filename, 'w'))
return default if result is _NONE else result
def _files(self):
return os.listdir(self.cache_directory)
def cache_list(self):
for f in self._files():
if f.endswith(self.suffix):
yield f[:-len(self.suffix)]
def file_count(self):
return len(self._files())
def clear(self):
"""Clears both local files and memory."""
self.cached_data = {}
for f in self._files():
os.remove(os.path.join(self.cache_directory, f))

View File

@@ -0,0 +1,82 @@
from __future__ import absolute_import, division, print_function, unicode_literals
"""A function that can be specified at the command line, with an argument."""
import importlib
import re
import tokenize
from StringIO import StringIO
MATCHER = re.compile(r'([\w.]+)(.*)')
REMAPPINGS = {
'false': False,
'true': True,
'null': None,
'False': False,
'True': True,
'None': None,
}
def eval_arguments(args):
args = args.strip()
if not args or (args == '()'):
return ()
tokens = list(tokenize.generate_tokens(StringIO(args).readline))
def remap():
for type, name, _, _, _ in tokens:
if type == tokenize.NAME and name not in REMAPPINGS:
yield tokenize.STRING, '"%s"' % name
else:
yield type, name
untok = tokenize.untokenize(remap())
if untok[1:-1].strip():
untok = untok[:-1] + ',)' # Force a tuple.
try:
return eval(untok, REMAPPINGS)
except Exception as e:
raise ValueError('Couldn\'t evaluate expression "%s" (became "%s"), '
'error "%s"' % (args, untok, str(e)))
class Function(object):
def __init__(self, desc='', default_path=''):
self.desc = desc.strip()
if not self.desc:
# Make an empty function that does nothing.
self.args = ()
self.function = lambda *args, **kwds: None
return
m = MATCHER.match(desc)
if not m:
raise ValueError('"%s" is not a function' % desc)
self.function, self.args = (g.strip() for g in m.groups())
self.args = eval_arguments(self.args)
if '.' not in self.function:
if default_path and not default_path.endswith('.'):
default_path += '.'
self.function = default_path + self.function
p, m = self.function.rsplit('.', 1)
mod = importlib.import_module(p)
# Errors in modules are swallowed here.
# except:
# raise ValueError('Can\'t find Python module "%s"' % p)
try:
self.function = getattr(mod, m)
except:
raise ValueError('No function "%s" in module "%s"' % (m, p))
def __str__(self):
return self.desc
def __call__(self, *args, **kwds):
return self.function(*(args + self.args), **kwds)
def __eq__(self, other):
return self.function == other.function and self.args == other.args
def __ne__(self, other):
return not (self == other)

21
bin/ripple/util/Log.py Normal file
View File

@@ -0,0 +1,21 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
VERBOSE = False
def out(*args, **kwds):
kwds.get('print', print)(*args, file=sys.stdout, **kwds)
def info(*args, **kwds):
if VERBOSE:
out(*args, **kwds)
def warn(*args, **kwds):
out('WARNING:', *args, **kwds)
def error(*args, **kwds):
out('ERROR:', *args, **kwds)
def fatal(*args, **kwds):
raise Exception('FATAL: ' + ' '.join(str(a) for a in args))

View File

@@ -0,0 +1,42 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from functools import wraps
import json
SEPARATORS = ',', ': '
INDENT = ' '
def pretty_print(item):
return json.dumps(item,
sort_keys=True,
indent=len(INDENT),
separators=SEPARATORS)
class Streamer(object):
def __init__(self, printer=print):
# No automatic spacing or carriage returns.
self.printer = lambda *args: printer(*args, end='', sep='')
self.first_key = True
def add(self, key, value):
if self.first_key:
self.first_key = False
self.printer('{')
else:
self.printer(',')
self.printer('\n', INDENT, '"', str(key), '": ')
pp = pretty_print(value).splitlines()
if len(pp) > 1:
for i, line in enumerate(pp):
if i > 0:
self.printer('\n', INDENT)
self.printer(line)
else:
self.printer(pp[0])
def finish(self):
if not self.first_key:
self.first_key = True
self.printer('\n}')

53
bin/ripple/util/Range.py Normal file
View File

@@ -0,0 +1,53 @@
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Convert a discontiguous range of integers to and from a human-friendly form.
Real world example is the server_info.complete_ledgers:
8252899-8403772,8403824,8403827-8403830,8403834-8403876
"""
def from_string(desc, **aliases):
if not desc:
return []
result = set()
for d in desc.split(','):
nums = [int(aliases.get(x) or x) for x in d.split('-')]
if len(nums) == 1:
result.add(nums[0])
elif len(nums) == 2:
result.update(range(nums[0], nums[1] + 1))
return result
def to_string(r):
groups = []
next_group = []
for i, x in enumerate(sorted(r)):
if next_group and (x - next_group[-1]) > 1:
groups.append(next_group)
next_group = []
next_group.append(x)
if next_group:
groups.append(next_group)
def display(g):
if len(g) == 1:
return str(g[0])
else:
return '%s-%s' % (g[0], g[-1])
return ','.join(display(g) for g in groups)
def is_range(desc, *names):
try:
from_string(desc, **dict((n, 1) for n in names))
return True;
except ValueError:
return False
def join_ranges(*ranges, **aliases):
result = set()
for r in ranges:
result.update(from_string(r, **aliases))
return result

46
bin/ripple/util/Search.py Normal file
View File

@@ -0,0 +1,46 @@
from __future__ import absolute_import, division, print_function, unicode_literals
FIRST, LAST = range(2)
def binary_search(begin, end, condition, location=FIRST):
"""Search for an i in the interval [begin, end] where condition(i) is true.
If location is FIRST, return the first such i.
If location is LAST, return the last such i.
If there is no such i, then throw an exception.
"""
b = condition(begin)
e = condition(end)
if b and e:
return begin if location == FIRST else end
if not (b or e):
raise ValueError('%d/%d' % (begin, end))
if b and location is FIRST:
return begin
if e and location is LAST:
return end
width = end - begin + 1
if width == 1:
if not b:
raise ValueError('%d/%d' % (begin, end))
return begin
if width == 2:
return begin if b else end
mid = (begin + end) // 2
m = condition(mid)
if m == b:
return binary_search(mid, end, condition, location)
else:
return binary_search(begin, mid, condition, location)
def linear_search(items, condition):
"""Yields each i in the interval [begin, end] where condition(i) is true.
"""
for i in items:
if condition(i):
yield i

21
bin/ripple/util/Time.py Normal file
View File

@@ -0,0 +1,21 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
# Format for human-readable dates in rippled
_DATE_FORMAT = '%Y-%b-%d'
_TIME_FORMAT = '%H:%M:%S'
_DATETIME_FORMAT = '%s %s' % (_DATE_FORMAT, _TIME_FORMAT)
_FORMATS = _DATE_FORMAT, _TIME_FORMAT, _DATETIME_FORMAT
def parse_datetime(desc):
for fmt in _FORMATS:
try:
return datetime.date.strptime(desc, fmt)
except:
pass
raise ValueError("Can't understand date '%s'." % date)
def format_datetime(dt):
return dt.strftime(_DATETIME_FORMAT)

View File

View File

@@ -0,0 +1,12 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.util.Cache import NamedCache
from unittest import TestCase
class test_Cache(TestCase):
def setUp(self):
self.cache = NamedCache()
def test_trivial(self):
pass

View File

@@ -0,0 +1,163 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.util import ConfigFile
from unittest import TestCase
class test_ConfigFile(TestCase):
def test_trivial(self):
self.assertEquals(ConfigFile.read(''), {})
def test_full(self):
self.assertEquals(ConfigFile.read(FULL.splitlines()), RESULT)
RESULT = {
'websocket_port': '6206',
'database_path': '/development/alpha/db',
'sntp_servers':
['time.windows.com', 'time.apple.com', 'time.nist.gov', 'pool.ntp.org'],
'validation_seed': 'sh1T8T9yGuV7Jb6DPhqSzdU2s5LcV',
'node_size': 'medium',
'rpc_startup': {
'command': 'log_level',
'severity': 'debug'},
'ips': ['r.ripple.com', '51235'],
'node_db': {
'file_size_mult': '2',
'file_size_mb': '8',
'cache_mb': '256',
'path': '/development/alpha/db/rocksdb',
'open_files': '2000',
'type': 'RocksDB',
'filter_bits': '12'},
'peer_port': '53235',
'ledger_history': 'full',
'rpc_ip': '127.0.0.1',
'websocket_public_ip': '0.0.0.0',
'rpc_allow_remote': '0',
'validators':
[['n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7', 'RL1'],
['n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj', 'RL2'],
['n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C', 'RL3'],
['n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS', 'RL4'],
['n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA', 'RL5']],
'debug_logfile': '/development/alpha/debug.log',
'websocket_public_port': '5206',
'peer_ip': '0.0.0.0',
'rpc_port': '5205',
'validation_quorum': '3',
'websocket_ip': '127.0.0.1'}
FULL = """
[ledger_history]
full
# Allow other peers to connect to this server.
#
[peer_ip]
0.0.0.0
[peer_port]
53235
# Allow untrusted clients to connect to this server.
#
[websocket_public_ip]
0.0.0.0
[websocket_public_port]
5206
# Provide trusted websocket ADMIN access to the localhost.
#
[websocket_ip]
127.0.0.1
[websocket_port]
6206
# Provide trusted json-rpc ADMIN access to the localhost.
#
[rpc_ip]
127.0.0.1
[rpc_port]
5205
[rpc_allow_remote]
0
[node_size]
medium
# This is primary persistent datastore for rippled. This includes transaction
# metadata, account states, and ledger headers. Helpful information can be
# found here: https://ripple.com/wiki/NodeBackEnd
[node_db]
type=RocksDB
path=/development/alpha/db/rocksdb
open_files=2000
filter_bits=12
cache_mb=256
file_size_mb=8
file_size_mult=2
[database_path]
/development/alpha/db
# This needs to be an absolute directory reference, not a relative one.
# Modify this value as required.
[debug_logfile]
/development/alpha/debug.log
[sntp_servers]
time.windows.com
time.apple.com
time.nist.gov
pool.ntp.org
# Where to find some other servers speaking the Ripple protocol.
#
[ips]
r.ripple.com 51235
# The latest validators can be obtained from
# https://ripple.com/ripple.txt
#
[validators]
n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7 RL1
n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj RL2
n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C RL3
n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS RL4
n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA RL5
# Ditto.
[validation_quorum]
3
[validation_seed]
sh1T8T9yGuV7Jb6DPhqSzdU2s5LcV
# Turn down default logging to save disk space in the long run.
# Valid values here are trace, debug, info, warning, error, and fatal
[rpc_startup]
{ "command": "log_level", "severity": "debug" }
# Configure SSL for WebSockets. Not enabled by default because not everybody
# has an SSL cert on their server, but if you uncomment the following lines and
# set the path to the SSL certificate and private key the WebSockets protocol
# will be protected by SSL/TLS.
#[websocket_secure]
#1
#[websocket_ssl_cert]
#/etc/ssl/certs/server.crt
#[websocket_ssl_key]
#/etc/ssl/private/server.key
# Defaults to 0 ("no") so that you can use self-signed SSL certificates for
# development, or internally.
#[ssl_verify]
#0
""".strip()

View File

@@ -0,0 +1,20 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.util.Decimal import Decimal
from unittest import TestCase
class test_Decimal(TestCase):
def test_construct(self):
self.assertEquals(str(Decimal('')), '0')
self.assertEquals(str(Decimal('0')), '0')
self.assertEquals(str(Decimal('0.2')), '0.2')
self.assertEquals(str(Decimal('-0.2')), '-0.2')
self.assertEquals(str(Decimal('3.1416')), '3.1416')
def test_accumulate(self):
d = Decimal()
d.accumulate('0.5')
d.accumulate('3.1416')
d.accumulate('-23.34234')
self.assertEquals(str(d), '-19.70074')

View File

@@ -0,0 +1,56 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.util import Dict
from unittest import TestCase
class test_Dict(TestCase):
def test_count_all_subitems(self):
self.assertEquals(Dict.count_all_subitems({}), 1)
self.assertEquals(Dict.count_all_subitems({'a': {}}), 2)
self.assertEquals(Dict.count_all_subitems([1]), 2)
self.assertEquals(Dict.count_all_subitems([1, 2]), 3)
self.assertEquals(Dict.count_all_subitems([1, {2: 3}]), 4)
self.assertEquals(Dict.count_all_subitems([1, {2: [3]}]), 5)
self.assertEquals(Dict.count_all_subitems([1, {2: [3, 4]}]), 6)
def test_prune(self):
self.assertEquals(Dict.prune({}, 0), {})
self.assertEquals(Dict.prune({}, 1), {})
self.assertEquals(Dict.prune({1: 2}, 0), '{dict with 1 subitem}')
self.assertEquals(Dict.prune({1: 2}, 1), {1: 2})
self.assertEquals(Dict.prune({1: 2}, 2), {1: 2})
self.assertEquals(Dict.prune([1, 2, 3], 0), '[list with 3 subitems]')
self.assertEquals(Dict.prune([1, 2, 3], 1), [1, 2, 3])
self.assertEquals(Dict.prune([{1: [2, 3]}], 0),
'[list with 4 subitems]')
self.assertEquals(Dict.prune([{1: [2, 3]}], 1),
['{dict with 3 subitems}'])
self.assertEquals(Dict.prune([{1: [2, 3]}], 2),
[{1: u'[list with 2 subitems]'}])
self.assertEquals(Dict.prune([{1: [2, 3]}], 3),
[{1: [2, 3]}])
def test_prune_nosub(self):
self.assertEquals(Dict.prune({}, 0, False), {})
self.assertEquals(Dict.prune({}, 1, False), {})
self.assertEquals(Dict.prune({1: 2}, 0, False), '{dict with 1 subitem}')
self.assertEquals(Dict.prune({1: 2}, 1, False), {1: 2})
self.assertEquals(Dict.prune({1: 2}, 2, False), {1: 2})
self.assertEquals(Dict.prune([1, 2, 3], 0, False),
'[list with 3 subitems]')
self.assertEquals(Dict.prune([1, 2, 3], 1, False), [1, 2, 3])
self.assertEquals(Dict.prune([{1: [2, 3]}], 0, False),
'[list with 1 subitem]')
self.assertEquals(Dict.prune([{1: [2, 3]}], 1, False),
['{dict with 1 subitem}'])
self.assertEquals(Dict.prune([{1: [2, 3]}], 2, False),
[{1: u'[list with 2 subitems]'}])
self.assertEquals(Dict.prune([{1: [2, 3]}], 3, False),
[{1: [2, 3]}])

View File

@@ -0,0 +1,37 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.util.Function import Function, MATCHER
from unittest import TestCase
def FN(*args, **kwds):
return args, kwds
class test_Function(TestCase):
def match_test(self, item, *results):
self.assertEquals(MATCHER.match(item).groups(), results)
def test_simple(self):
self.match_test('function', 'function', '')
self.match_test('f(x)', 'f', '(x)')
def test_empty_function(self):
self.assertEquals(Function()(), None)
def test_empty_args(self):
f = Function('ripple.util.test_Function.FN()')
self.assertEquals(f(), ((), {}))
def test_function(self):
f = Function('ripple.util.test_Function.FN(True, {1: 2}, None)')
self.assertEquals(f(), ((True, {1: 2}, None), {}))
self.assertEquals(f('hello', foo='bar'),
(('hello', True, {1: 2}, None), {'foo':'bar'}))
self.assertEquals(
f, Function('ripple.util.test_Function.FN(true, {1: 2}, null)'))
def test_quoting(self):
f = Function('ripple.util.test_Function.FN(testing)')
self.assertEquals(f(), (('testing',), {}))
f = Function('ripple.util.test_Function.FN(testing, true, false, null)')
self.assertEquals(f(), (('testing', True, False, None), {}))

View File

@@ -0,0 +1,56 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.util import PrettyPrint
from unittest import TestCase
class test_PrettyPrint(TestCase):
def setUp(self):
self._results = []
self.printer = PrettyPrint.Streamer(printer=self.printer)
def printer(self, *args, **kwds):
self._results.extend(args)
def run_test(self, expected, *args):
for i in range(0, len(args), 2):
self.printer.add(args[i], args[i + 1])
self.printer.finish()
self.assertEquals(''.join(self._results), expected)
def test_simple_printer(self):
self.run_test(
'{\n "foo": "bar"\n}',
'foo', 'bar')
def test_multiple_lines(self):
self.run_test(
'{\n "foo": "bar",\n "baz": 5\n}',
'foo', 'bar', 'baz', 5)
def test_multiple_lines(self):
self.run_test(
"""
{
"foo": {
"bar": 1,
"baz": true
},
"bang": "bing"
}
""".strip(), 'foo', {'bar': 1, 'baz': True}, 'bang', 'bing')
def test_multiple_lines_with_list(self):
self.run_test(
"""
{
"foo": [
"bar",
1
],
"baz": [
23,
42
]
}
""".strip(), 'foo', ['bar', 1], 'baz', [23, 42])

View File

@@ -0,0 +1,28 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.util import Range
from unittest import TestCase
class test_Range(TestCase):
def round_trip(self, s, *items):
self.assertEquals(Range.from_string(s), set(items))
self.assertEquals(Range.to_string(items), s)
def test_complete(self):
self.round_trip('10,19', 10, 19)
self.round_trip('10', 10)
self.round_trip('10-12', 10, 11, 12)
self.round_trip('10,19,42-45', 10, 19, 42, 43, 44, 45)
def test_names(self):
self.assertEquals(
Range.from_string('first,last,current', first=1, last=3, current=5),
set([1, 3, 5]))
def test_is_range(self):
self.assertTrue(Range.is_range(''))
self.assertTrue(Range.is_range('10'))
self.assertTrue(Range.is_range('10,12'))
self.assertFalse(Range.is_range('10,12,fred'))
self.assertTrue(Range.is_range('10,12,fred', 'fred'))

View File

@@ -0,0 +1,44 @@
from __future__ import absolute_import, division, print_function, unicode_literals
from ripple.util.Search import binary_search, linear_search, FIRST, LAST
from unittest import TestCase
class test_Search(TestCase):
def condition(self, i):
return 10 <= i < 15;
def test_linear_full(self):
self.assertEquals(list(linear_search(range(21), self.condition)),
[10, 11, 12, 13, 14])
def test_linear_partial(self):
self.assertEquals(list(linear_search(range(8, 14), self.condition)),
[10, 11, 12, 13])
self.assertEquals(list(linear_search(range(11, 14), self.condition)),
[11, 12, 13])
self.assertEquals(list(linear_search(range(12, 18), self.condition)),
[12, 13, 14])
def test_linear_empty(self):
self.assertEquals(list(linear_search(range(1, 4), self.condition)), [])
def test_binary_first(self):
self.assertEquals(binary_search(0, 14, self.condition, FIRST), 10)
self.assertEquals(binary_search(10, 19, self.condition, FIRST), 10)
self.assertEquals(binary_search(14, 14, self.condition, FIRST), 14)
self.assertEquals(binary_search(14, 15, self.condition, FIRST), 14)
self.assertEquals(binary_search(13, 15, self.condition, FIRST), 13)
def test_binary_last(self):
self.assertEquals(binary_search(10, 20, self.condition, LAST), 14)
self.assertEquals(binary_search(0, 14, self.condition, LAST), 14)
self.assertEquals(binary_search(14, 14, self.condition, LAST), 14)
self.assertEquals(binary_search(14, 15, self.condition, LAST), 14)
self.assertEquals(binary_search(13, 15, self.condition, LAST), 14)
def test_binary_throws(self):
self.assertRaises(
ValueError, binary_search, 0, 20, self.condition, LAST)
self.assertRaises(
ValueError, binary_search, 0, 20, self.condition, FIRST)

747
bin/six.py Normal file
View File

@@ -0,0 +1,747 @@
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2014 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.7.3"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
# This is a bit ugly, but it avoids running this again.
delattr(obj.__class__, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
else:
def iterkeys(d, **kw):
return iter(d.iterkeys(**kw))
def itervalues(d, **kw):
return iter(d.itervalues(**kw))
def iteritems(d, **kw):
return iter(d.iteritems(**kw))
def iterlists(d, **kw):
return iter(d.iterlists(**kw))
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
def iterbytes(buf):
return (ord(byte) for byte in buf)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped):
def wrapper(f):
f = functools.wraps(wrapped)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)

View File

@@ -2,14 +2,38 @@
# Coding Standards
Coding standards used here are extreme strict and consistent. The style
evolved gradually over the years, incorporating generally acknowledged
best-practice C++ advice, experience, and personal preference.
Coding standards used here gradually evolve and propagate through
code reviews. Some aspects are enforced more strictly than others.
## Don't Repeat Yourself!
## Rules
The [Don't Repeat Yourself][1] principle summarises the essence of what it
means to write good code, in all languages, at all levels.
These rules only apply to our own code. We can't enforce any sort of
style on the external repositories and libraries we include. The best
guideline is to maintain the standards that are used in those libraries.
* Tab inserts 4 spaces. No tab characters.
* Braces are indented in the [Allman style][1].
* Modern C++ principles. No naked ```new``` or ```delete```.
* Line lengths limited to 80 characters. Exceptions limited to data and tables.
## Guidelines
If you want to do something contrary to these guidelines, understand
why you're doing it. Think, use common sense, and consider that this
your changes will probably need to be maintained long after you've
moved on to other projects.
* Use white space and blank lines to guide the eye and keep your intent clear.
* Put private data members at the top of a class, and the 6 public special
members immediately after, in the following order:
* Destructor
* Default constructor
* Copy constructor
* Copy assignment
* Move constructor
* Move assignment
* Don't over-inline by defining large functions within the class
declaration, not even for template classes.
## Formatting
@@ -17,9 +41,6 @@ The goal of source code formatting should always be to make things as easy to
read as possible. White space is used to guide the eye so that details are not
overlooked. Blank lines are used to separate code into "paragraphs."
* No tab characters please.
* Tab stops are set to 4 spaces.
* Braces are indented in the [Allman style][2].
* Always place a space before and after all binary operators,
especially assignments (`operator=`).
* The `!` operator should always be followed by a space.
@@ -62,156 +83,4 @@ overlooked. Blank lines are used to separate code into "paragraphs."
* Always place a space in between the template angle brackets and the type
name. Template code is already hard enough to read!
## Naming conventions
* Member variables and method names are written with camel-case, and never
begin with a capital letter.
* Class names are also written in camel-case, but always begin with a capital
letter.
* For global variables... well, you shouldn't have any, so it doesn't matter.
* Class data members begin with `m_`, static data members begin with `s_`.
Global variables begin with `g_`. This is so the scope of the corresponding
declaration can be easily determined.
* Avoid underscores in your names, especially leading or trailing underscores.
In particular, leading underscores should be avoided, as these are often used
in standard library code, so to use them in your own code looks quite jarring.
* If you really have to write a macro for some reason, then make it all caps,
with underscores to separate the words. And obviously make sure that its name
is unlikely to clash with symbols used in other libraries or 3rd party code.
## Types, const-correctness
* If a method can (and should!) be const, make it const!
* If a method definitely doesn't throw an exception (be careful!), mark it as
`noexcept`
* When returning a temporary object, e.g. a String, the returned object should
be non-const, so that if the class has a C++11 move operator, it can be used.
* If a local variable can be const, then make it const!
* Remember that pointers can be const as well as primitives; For example, if
you have a `char*` whose contents are going to be altered, you may still be
able to make the pointer itself const, e.g. `char* const foobar = getFoobar();`.
* Do not declare all your local variables at the top of a function or method
(i.e. in the old-fashioned C-style). Declare them at the last possible moment,
and give them as small a scope as possible.
* Object parameters should be passed as `const&` wherever possible. Only
pass a parameter as a copy-by-value object if you really need to mutate
a local copy inside the method, and if making a local copy inside the method
would be difficult.
* Use portable `for()` loop variable scoping (i.e. do not have multiple for
loops in the same scope that each re-declare the same variable name, as
this fails on older compilers)
* When you're testing a pointer to see if it's null, never write
`if (myPointer)`. Always avoid that implicit cast-to-bool by writing it more
fully: `if (myPointer != nullptr)`. And likewise, never ever write
`if (! myPointer)`, instead always write `if (myPointer == nullptr)`.
It is more readable that way.
* Avoid C-style casts except when converting between primitive numeric types.
Some people would say "avoid C-style casts altogether", but `static_cast` is
a bit unreadable when you just want to cast an `int` to a `float`. But
whenever a pointer is involved, or a non-primitive object, always use
`static_cast`. And when you're reinterpreting data, always use
`reinterpret_cast`.
* Until C++ gets a universal 64-bit primitive type (part of the C++11
standard), it's best to stick to the `int64` and `uint64` typedefs.
## Object lifetime and ownership
* Absolutely do NOT use `delete`, `deleteAndZero`, etc. There are very very few
situations where you can't use a `ScopedPointer` or some other automatic
lifetime management class.
* Do not use `new` unless there's no alternative. Whenever you type `new`, always
treat it as a failure to find a better solution. If a local variable can be
allocated on the stack rather than the heap, then always do so.
* Do not ever use `new` or `malloc` to allocate a C++ array. Always use a
`HeapBlock` instead.
* And just to make it doubly clear: Never use `malloc` or `calloc`.
* If a parent object needs to create and own some kind of child object, always
use composition as your first choice. If that's not possible (e.g. if the
child needs a pointer to the parent for its constructor), then use a
`ScopedPointer`.
* If possible, pass an object as a reference rather than a pointer. If possible,
make it a `const` reference.
* Obviously avoid static and global values. Sometimes there's no alternative,
but if there is an alternative, then use it, no matter how much effort it
involves.
* If allocating a local POD structure (e.g. an operating-system structure in
native code), and you need to initialise it with zeros, use the `= { 0 };`
syntax as your first choice for doing this. If for some reason that's not
appropriate, use the `zerostruct()` function, or in case that isn't suitable,
use `zeromem()`. Don't use `memset()`.
## Classes
* Declare a class's public section first, and put its constructors and
destructor first. Any protected items come next, and then private ones.
* Use the most restrictive access-specifier possible for each member. Prefer
`private` over `protected`, and `protected` over `public`. Don't expose
things unnecessarily.
* Preferred positioning for any inherited classes is to put them to the right
of the class name, vertically aligned, e.g.:
class Thing : public Foo,
private Bar
{
}
* Put a class's member variables (which should almost always be private, of course),
after all the public and protected method declarations.
* Any private methods can go towards the end of the class, after the member
variables.
* If your class does not have copy-by-value semantics, derive the class from
`Uncopyable`.
* If your class is likely to be leaked, then derive your class from
`LeakChecked<>`.
* Constructors that take a single parameter should be default be marked
`explicit`. Obviously there are cases where you do want implicit conversion,
but always think about it carefully before writing a non-explicit constructor.
* Do not use `NULL`, `null`, or 0 for a null-pointer. And especially never use
'0L', which is particulary burdensome. Use `nullptr` instead - this is the
C++2011 standard, so get used to it. There's a fallback definition for `nullptr`
in Beast, so it's always possible to use it even if your compiler isn't yet
C++2011 compliant.
* All the C++ 'guru' books and articles are full of excellent and detailed advice
on when it's best to use inheritance vs composition. If you're not already
familiar with the received wisdom in these matters, then do some reading!
## Miscellaneous
* `goto` statements should not be used at all, even if the alternative is
more verbose code. The only exception is when implementing an algorithm in
a function as a state machine.
* Don't use macros! OK, obviously there are many situations where they're the
right tool for the job, but treat them as a last resort. Certainly don't ever
use a macro just to hold a constant value or to perform any kind of function
that could have been done as a real inline function. And it goes without saying
that you should give them names which aren't going to clash with other code.
And `#undef` them after you've used them, if possible.
* When using the `++` or `--` operators, never use post-increment if
pre-increment could be used instead. Although it doesn't matter for
primitive types, it's good practice to pre-increment since this can be
much more efficient for more complex objects. In particular, if you're
writing a for loop, always use pre-increment,
e.g. `for (int = 0; i < 10; ++i)`
* Never put an "else" statement after a "return"! This is well-explained in the
LLVM coding standards...and a couple of other very good pieces of advice from
the LLVM standards are in there as well.
* When getting a possibly-null pointer and using it only if it's non-null, limit
the scope of the pointer as much as possible - e.g. Do NOT do this:
Foo* f = getFoo ();
if (f != nullptr)
f->doSomething ();
// other code
f->doSomething (); // oops! f may be null!
..instead, prefer to write it like this, which reduces the scope of the
pointer, making it impossible to write code that accidentally uses a null
pointer:
if (Foo* f = getFoo ())
f->doSomethingElse ();
// f is out-of-scope here, so impossible to use it if it's null
(This also results in smaller, cleaner code)
[1]: http://en.wikipedia.org/wiki/Don%27t_repeat_yourself
[2]: http://en.wikipedia.org/wiki/Indent_style#Allman_style
[1]: http://en.wikipedia.org/wiki/Indent_style#Allman_style

File diff suppressed because it is too large Load Diff

63
doc/HeapProfiling.md Normal file
View File

@@ -0,0 +1,63 @@
## Heap profiling of rippled with jemalloc
The jemalloc library provides a good API for doing heap analysis,
including a mechanism to dump a description of the heap from within the
running application via a function call. Details on how to perform this
activity in general, as well as how to acquire the software, are available on
the jemalloc site:
[https://github.com/jemalloc/jemalloc/wiki/Use-Case:-Heap-Profiling](https://github.com/jemalloc/jemalloc/wiki/Use-Case:-Heap-Profiling)
jemalloc is acquired separately from rippled, and is not affiliated
with Ripple Labs. If you compile and install jemalloc from the
source release with default options, it will install the library and header
under `/usr/local/lib` and `/usr/local/include`, respectively. Heap
profiling has been tested with rippled on a Linux platform. It should
work on platforms on which both rippled and jemalloc are available.
To link rippled with jemalloc, the argument
`profile-jemalloc=<jemalloc_dir>` is provided after the optional target.
The `<jemalloc_dir>` argument should be the same as that of the
`--prefix` parameter passed to the jemalloc configure script when building.
## Examples:
Build rippled with jemalloc library under /usr/local/lib and
header under /usr/local/include:
$ scons profile-jemalloc=/usr/local
Build rippled using clang with the jemalloc library under /opt/local/lib
and header under /opt/local/include:
$ scons clang profile-jemalloc=/opt/local
----------------------
## Using the jemalloc library from within the code
The `profile-jemalloc` parameter enables a macro definition called
`PROFILE_JEMALLOC`. Include the jemalloc header file as
well as the api call(s) that you wish to make within preprocessor
conditional groups, such as:
In global scope:
#ifdef PROFILE_JEMALLOC
#include <jemalloc/jemalloc.h>
#endif
And later, within a function scope:
#ifdef PROFILE_JEMALLOC
mallctl("prof.dump", NULL, NULL, NULL, 0);
#endif
Fuller descriptions of how to acquire and use jemalloc's api to do memory
analysis are available at the [jemalloc
site.](http://www.canonware.com/jemalloc/)
Linking against the jemalloc library will override
the system's default `malloc()` and related functions with jemalloc's
implementation. This is the case even if the code is not instrumented
to use jemalloc's specific API.

File diff suppressed because it is too large Load Diff

View File

@@ -10,7 +10,7 @@
},
"dependencies": {
"ripple-lib": "0.7.37",
"ripple-lib": "0.8.2",
"async": "~0.2.9",
"extend": "~1.2.0",
"simple-jsonrpc": "~0.0.2",

View File

@@ -57,18 +57,6 @@
//#define BEAST_FORCE_DEBUG 1
#endif
/** Config: BEAST_LOG_ASSERTIONS
If this flag is enabled, the the bassert and bassertfalse macros will always
use Logger::writeToLog() to write a message when an assertion happens.
Enabling it will also leave this turned on in release builds. When it's
disabled, however, the bassert and bassertfalse macros will not be compiled
in a release build.
@see bassert, bassertfalse, Logger
*/
#ifndef BEAST_LOG_ASSERTIONS
//#define BEAST_LOG_ASSERTIONS 1
#endif
/** Config: BEAST_CHECK_MEMORY_LEAKS
Enables a memory-leak check for certain objects when the app terminates.
See the LeakChecked class for more details about enabling leak checking for
@@ -220,17 +208,17 @@
#define RIPPLE_SINGLE_IO_SERVICE_THREAD 0
#endif
/** Config: RIPPLE_STRUCTURED_OVERLAY
Enables Structured Overlay support (unfinished)
/** Config: RIPPLE_STRUCTURED_OVERLAY_CLIENT
RIPPLE_STRUCTURED_OVERLAY_SERVER
Enables Structured Overlay support for the client or server roles.
This feature is currently in development:
https://ripplelabs.atlassian.net/browse/RIPD-157
*/
#ifndef RIPPLE_STRUCTURED_OVERLAY
#define RIPPLE_STRUCTURED_OVERLAY 0
#ifndef RIPPLE_STRUCTURED_OVERLAY_CLIENT
#define RIPPLE_STRUCTURED_OVERLAY_CLIENT 0
#endif
/** Config: RIPPLE_ASYNC_RPC_HANDLER
*/
#ifndef RIPPLE_ASYNC_RPC_HANDLER
#define RIPPLE_ASYNC_RPC_HANDLER 1
#ifndef RIPPLE_STRUCTURED_OVERLAY_SERVER
#define RIPPLE_STRUCTURED_OVERLAY_SERVER 1
#endif
#endif

View File

@@ -1,9 +1,9 @@
# src
Some of these directories come from entire outside repositories
brought in using git-subtree. This means that the source files are
inserted directly into the rippled repository. They can be edited
and committed just as if they were normal files.
Some of these directories come from entire outside repositories brought in
using git-subtree. This means that the source files are inserted directly
into the rippled repository. They can be edited and committed just as if they
were normal files.
However, if you create a commit that contains files both from a
subtree, and from the ripple source tree please use care when designing
@@ -99,7 +99,7 @@ ripple-fork
## protobuf
Ripple's fork of protobuf. We've changed some names in order to support the
unity-style of build (a single .cpp addded to the project, instead of
unity-style of build (a single .cpp added to the project, instead of
linking to a separately built static library).
Repository

View File

@@ -55,18 +55,6 @@
//#define BEAST_FORCE_DEBUG 1
#endif
/** Config: BEAST_LOG_ASSERTIONS
If this flag is enabled, the the bassert and bassertfalse macros will always
use Logger::writeToLog() to write a message when an assertion happens.
Enabling it will also leave this turned on in release builds. When it's
disabled, however, the bassert and bassertfalse macros will not be compiled
in a release build.
@see bassert, bassertfalse, Logger
*/
#ifndef BEAST_LOG_ASSERTIONS
//#define BEAST_LOG_ASSERTIONS 1
#endif
/** Config: BEAST_CHECK_MEMORY_LEAKS
Enables a memory-leak check for certain objects when the app terminates.
See the LeakChecked class for more details about enabling leak checking for

View File

@@ -34,101 +34,6 @@
namespace beast {
// Some indispensible min/max functions
/** Returns the larger of two values. */
template <typename Type>
inline Type bmax (const Type a, const Type b)
{ return (a < b) ? b : a; }
/** Returns the larger of three values. */
template <typename Type>
inline Type bmax (const Type a, const Type b, const Type c)
{ return (a < b) ? ((b < c) ? c : b) : ((a < c) ? c : a); }
/** Returns the larger of four values. */
template <typename Type>
inline Type bmax (const Type a, const Type b, const Type c, const Type d)
{ return bmax (a, bmax (b, c, d)); }
/** Returns the smaller of two values. */
template <typename Type>
inline Type bmin (const Type a, const Type b)
{ return (b < a) ? b : a; }
/** Returns the smaller of three values. */
template <typename Type>
inline Type bmin (const Type a, const Type b, const Type c)
{ return (b < a) ? ((c < b) ? c : b) : ((c < a) ? c : a); }
/** Returns the smaller of four values. */
template <typename Type>
inline Type bmin (const Type a, const Type b, const Type c, const Type d)
{ return bmin (a, bmin (b, c, d)); }
/** Scans an array of values, returning the minimum value that it contains. */
template <typename Type>
const Type findMinimum (const Type* data, int numValues)
{
if (numValues <= 0)
return Type();
Type result (*data++);
while (--numValues > 0) // (> 0 rather than >= 0 because we've already taken the first sample)
{
const Type& v = *data++;
if (v < result) result = v;
}
return result;
}
/** Scans an array of values, returning the maximum value that it contains. */
template <typename Type>
const Type findMaximum (const Type* values, int numValues)
{
if (numValues <= 0)
return Type();
Type result (*values++);
while (--numValues > 0) // (> 0 rather than >= 0 because we've already taken the first sample)
{
const Type& v = *values++;
if (result < v) result = v;
}
return result;
}
/** Scans an array of values, returning the minimum and maximum values that it contains. */
template <typename Type>
void findMinAndMax (const Type* values, int numValues, Type& lowest, Type& highest)
{
if (numValues <= 0)
{
lowest = Type();
highest = Type();
}
else
{
Type mn (*values++);
Type mx (mn);
while (--numValues > 0) // (> 0 rather than >= 0 because we've already taken the first sample)
{
const Type& v = *values++;
if (mx < v) mx = v;
if (v < mn) mn = v;
}
lowest = mn;
highest = mx;
}
}
//==============================================================================
/** Constrains a value to keep it within a given range.
@@ -151,7 +56,8 @@ inline Type blimit (const Type lowerLimit,
const Type upperLimit,
const Type valueToConstrain) noexcept
{
bassert (lowerLimit <= upperLimit); // if these are in the wrong order, results are unpredictable..
// if these are in the wrong order, results are unpredictable.
bassert (lowerLimit <= upperLimit);
return (valueToConstrain < lowerLimit) ? lowerLimit
: ((upperLimit < valueToConstrain) ? upperLimit
@@ -177,24 +83,6 @@ inline bool isPositiveAndBelow (const int valueToTest, const int upperLimit) noe
return static_cast <unsigned int> (valueToTest) < static_cast <unsigned int> (upperLimit);
}
/** Returns true if a value is at least zero, and also less than or equal to a specified upper limit.
This is basically a quicker way to write:
@code valueToTest >= 0 && valueToTest <= upperLimit
@endcode
*/
template <typename Type>
inline bool isPositiveAndNotGreaterThan (Type valueToTest, Type upperLimit) noexcept
{
bassert (Type() <= upperLimit); // makes no sense to call this if the upper limit is itself below zero..
return Type() <= valueToTest && valueToTest <= upperLimit;
}
template <>
inline bool isPositiveAndNotGreaterThan (const int valueToTest, const int upperLimit) noexcept
{
bassert (upperLimit >= 0); // makes no sense to call this if the upper limit is itself below zero..
return static_cast <unsigned int> (valueToTest) <= static_cast <unsigned int> (upperLimit);
}
//==============================================================================
@@ -214,55 +102,6 @@ int numElementsInArray (Type (&array)[N])
return N;
}
/** 64-bit abs function. */
inline std::int64_t abs64 (const std::int64_t n) noexcept
{
return (n >= 0) ? n : -n;
}
//==============================================================================
#if BEAST_MSVC
#pragma optimize ("t", off)
#ifndef __INTEL_COMPILER
#pragma float_control (precise, on, push)
#endif
#endif
/** Fast floating-point-to-integer conversion.
This is faster than using the normal c++ cast to convert a float to an int, and
it will round the value to the nearest integer, rather than rounding it down
like the normal cast does.
Note that this routine gets its speed at the expense of some accuracy, and when
rounding values whose floating point component is exactly 0.5, odd numbers and
even numbers will be rounded up or down differently.
*/
template <typename FloatType>
inline int roundToInt (const FloatType value) noexcept
{
#ifdef __INTEL_COMPILER
#pragma float_control (precise, on, push)
#endif
union { int asInt[2]; double asDouble; } n;
n.asDouble = ((double) value) + 6755399441055744.0;
#if BEAST_BIG_ENDIAN
return n.asInt [1];
#else
return n.asInt [0];
#endif
}
#if BEAST_MSVC
#ifndef __INTEL_COMPILER
#pragma float_control (pop)
#endif
#pragma optimize ("", on) // resets optimisations to the project defaults
#endif
}
#endif

View File

@@ -25,7 +25,6 @@
#define BEAST_ATOMIC_H_INCLUDED
#include <beast/Config.h>
#include <beast/StaticAssert.h>
#include <beast/utility/noexcept.h>
@@ -43,6 +42,10 @@ namespace beast {
template <typename Type>
class Atomic
{
// This class can only be used for types which are 32 or 64 bits in size.
static_assert (sizeof (Type) == 4 || sizeof (Type) == 8,
"Atomic arguments must be 32- or 64-bit long primitive types.");
public:
/** Creates a new value, initialised to zero. */
inline Atomic() noexcept
@@ -65,8 +68,6 @@ public:
/** Destructor. */
inline ~Atomic() noexcept
{
// This class can only be used for types which are 32 or 64 bits in size.
static_bassert (sizeof (Type) == 4 || sizeof (Type) == 8);
}
/** Atomically reads and returns the current value. */

View File

@@ -25,7 +25,6 @@
#define BEAST_BYTEORDER_H_INCLUDED
#include <beast/Config.h>
#include <beast/Uncopyable.h>
#include <cstdint>
@@ -35,7 +34,7 @@ namespace beast {
/** Contains static methods for converting the byte order between different
endiannesses.
*/
class ByteOrder : public Uncopyable
class ByteOrder
{
public:
//==============================================================================
@@ -105,6 +104,8 @@ public:
private:
ByteOrder();
ByteOrder(ByteOrder const&) = delete;
ByteOrder& operator= (ByteOrder const&) = delete;
};
//==============================================================================

View File

@@ -20,11 +20,8 @@
#ifndef BEAST_CRYPTO_H_INCLUDED
#define BEAST_CRYPTO_H_INCLUDED
#include <beast/crypto/BinaryEncoding.h>
#include <beast/crypto/MurmurHash.h>
#include <beast/crypto/Sha256.h>
#include <beast/crypto/UnsignedInteger.h>
#include <beast/crypto/UnsignedIntegerCalc.h>
#endif

View File

@@ -29,7 +29,6 @@
#include <stdexcept>
#include <beast/Memory.h>
#include <beast/Uncopyable.h>
// If the MSVC debug heap headers were included, disable
// the macros during the juce include since they conflict.
@@ -122,7 +121,7 @@ namespace HeapBlockHelper
@see Array, MemoryBlock
*/
template <class ElementType, bool throwOnFailure = false>
class HeapBlock : public Uncopyable
class HeapBlock
{
public:
//==============================================================================
@@ -136,12 +135,6 @@ public:
{
}
HeapBlock (HeapBlock& other)
{
data = other.data;
other.data = nullptr;
}
/** Creates a HeapBlock containing a number of elements.
The contents of the block are undefined, as it will have been created by a
@@ -169,6 +162,10 @@ public:
throwOnAllocationFailure();
}
HeapBlock(HeapBlock const&) = delete;
HeapBlock& operator= (HeapBlock const&) = delete;
/** Destructor.
This will free the data, if any has been allocated.
*/

View File

@@ -27,7 +27,6 @@
#include <cstring>
#include <beast/Config.h>
#include <beast/Uncopyable.h>
namespace beast {
@@ -78,12 +77,16 @@ Type* createCopyIfNotNull (const Type* pointer)
/** A handy C++ wrapper that creates and deletes an NSAutoreleasePool object using RAII.
You should use the BEAST_AUTORELEASEPOOL macro to create a local auto-release pool on the stack.
*/
class ScopedAutoReleasePool : public Uncopyable
class ScopedAutoReleasePool
{
public:
ScopedAutoReleasePool();
~ScopedAutoReleasePool();
ScopedAutoReleasePool(ScopedAutoReleasePool const&) = delete;
ScopedAutoReleasePool& operator= (ScopedAutoReleasePool const&) = delete;
private:
void* pool;
};

View File

@@ -1,45 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_STATICASSERT_H_INCLUDED
#define BEAST_STATICASSERT_H_INCLUDED
#ifndef DOXYGEN
namespace beast
{
template <bool b>
struct BeastStaticAssert;
template <>
struct BeastStaticAssert <true>
{
static void dummy() {}
};
}
#endif
/** A compile-time assertion macro.
If the expression parameter is false, the macro will cause a compile error.
(The actual error message that the compiler generates may be completely
bizarre and seem to have no relation to the place where you put the
static_assert though!)
*/
#define static_bassert(expression) beast::BeastStaticAssert<expression>::dummy();
#endif

View File

@@ -1,77 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_UNCOPYABLE_H_INCLUDED
#define BEAST_UNCOPYABLE_H_INCLUDED
namespace beast
{
// Ideas from boost
/** Prevent copy construction and assignment.
This is used to suppress warnings and prevent unsafe operations on
objects which cannot be passed by value. Ideas based on Boost.
For example, instead of
@code
class MyClass
{
public:
//...
private:
MyClass (const MyClass&);
MyClass& operator= (const MyClass&);
};
@endcode
..you can just write:
@code
class MyClass : public Uncopyable
{
public:
//...
};
@endcode
@note The derivation should be public or else child classes which
also derive from Uncopyable may not compile.
*/
class Uncopyable
{
protected:
Uncopyable () { }
~Uncopyable () { }
private:
Uncopyable (Uncopyable const&);
Uncopyable const& operator= (Uncopyable const&);
};
}
#endif

View File

@@ -22,11 +22,5 @@
#endif
#include <beast/asio/impl/IPAddressConversion.cpp>
#include <beast/asio/tests/wrap_handler.test.cpp>
#include <beast/asio/tests/bind_handler.test.cpp>
#include <beast/asio/tests/enable_wait_for_async.test.cpp>
#include <beast/asio/tests/shared_handler.test.cpp>
#include <beast/asio/abstract_socket.cpp> // TEMPORARY!

View File

@@ -1,217 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <beast/asio/abstract_socket.h>
#include <beast/asio/bind_handler.h>
namespace beast {
namespace asio {
#if ! BEAST_COMPILER_CHECKS_SOCKET_OVERRIDES
//------------------------------------------------------------------------------
//
// Socket
//
//------------------------------------------------------------------------------
void* abstract_socket::this_layer_ptr (char const*) const
{
pure_virtual_called ();
return nullptr;
}
//------------------------------------------------------------------------------
//
// native_handle
//
//------------------------------------------------------------------------------
bool abstract_socket::native_handle (char const*, void*)
{
pure_virtual_called ();
return false;
}
//------------------------------------------------------------------------------
//
// basic_io_object
//
//------------------------------------------------------------------------------
boost::asio::io_service& abstract_socket::get_io_service ()
{
pure_virtual_called ();
return *static_cast <boost::asio::io_service*>(nullptr);
}
//------------------------------------------------------------------------------
//
// basic_socket
//
//------------------------------------------------------------------------------
void*
abstract_socket::lowest_layer_ptr (char const*) const
{
pure_virtual_called ();
return nullptr;
}
auto
abstract_socket::cancel (boost::system::error_code& ec) -> error_code
{
return pure_virtual_error (ec);
}
auto
abstract_socket::shutdown (shutdown_type, boost::system::error_code& ec) -> error_code
{
return pure_virtual_error (ec);
}
auto
abstract_socket::close (boost::system::error_code& ec) -> error_code
{
return pure_virtual_error (ec);
}
//------------------------------------------------------------------------------
//
// basic_socket_acceptor
//
//------------------------------------------------------------------------------
auto
abstract_socket::accept (abstract_socket&, error_code& ec) -> error_code
{
return pure_virtual_error (ec);
}
void
abstract_socket::async_accept (abstract_socket&, error_handler handler)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error()));
}
//------------------------------------------------------------------------------
//
// basic_stream_socket
//
//------------------------------------------------------------------------------
std::size_t
abstract_socket::read_some (mutable_buffers, error_code& ec)
{
ec = pure_virtual_error ();
return 0;
}
std::size_t
abstract_socket::write_some (const_buffers, error_code& ec)
{
ec = pure_virtual_error ();
return 0;
}
void
abstract_socket::async_read_some (mutable_buffers, transfer_handler handler)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error(), 0));
}
void
abstract_socket::async_write_some (const_buffers, transfer_handler handler)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error(), 0));
}
//------------------------------------------------------------------------------
//
// ssl::stream
//
//------------------------------------------------------------------------------
void*
abstract_socket::next_layer_ptr (char const*) const
{
pure_virtual_called ();
return nullptr;
}
bool
abstract_socket::needs_handshake ()
{
return false;
}
void
abstract_socket::set_verify_mode (int)
{
pure_virtual_called ();
}
auto
abstract_socket::handshake (handshake_type, error_code& ec) -> error_code
{
return pure_virtual_error (ec);
}
void
abstract_socket::async_handshake (handshake_type, error_handler handler)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error()));
}
auto
abstract_socket::handshake (handshake_type, const_buffers, error_code& ec) ->
error_code
{
return pure_virtual_error (ec);
}
void
abstract_socket::async_handshake (handshake_type, const_buffers,
transfer_handler handler)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error(), 0));
}
auto
abstract_socket::shutdown (error_code& ec) -> error_code
{
return pure_virtual_error (ec);
}
void
abstract_socket::async_shutdown (error_handler handler)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error()));
}
#endif
}
}

View File

@@ -1,404 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_ASIO_ABSTRACT_SOCKET_H_INCLUDED
#define BEAST_ASIO_ABSTRACT_SOCKET_H_INCLUDED
#include <beast/asio/buffer_sequence.h>
#include <beast/asio/shared_handler.h>
#include <boost/asio/io_service.hpp>
#include <boost/asio/socket_base.hpp>
#include <boost/asio/ssl/stream_base.hpp>
// Checking overrides replaces unimplemented stubs with pure virtuals
#ifndef BEAST_COMPILER_CHECKS_SOCKET_OVERRIDES
# define BEAST_COMPILER_CHECKS_SOCKET_OVERRIDES 1
#endif
#if BEAST_COMPILER_CHECKS_SOCKET_OVERRIDES
# define BEAST_SOCKET_VIRTUAL = 0
#else
# define BEAST_SOCKET_VIRTUAL
#endif
namespace beast {
namespace asio {
/** A high level socket abstraction.
This combines the capabilities of multiple socket interfaces such
as listening, connecting, streaming, and handshaking. It brings
everything together into a single abstract interface.
When member functions are called and the underlying implementation does
not support the operation, a fatal error is generated.
*/
class abstract_socket
: public boost::asio::ssl::stream_base
, public boost::asio::socket_base
{
protected:
typedef boost::system::error_code error_code;
typedef asio::shared_handler <void (void)> post_handler;
typedef asio::shared_handler <void (error_code)> error_handler;
typedef asio::shared_handler <
void (error_code, std::size_t)> transfer_handler;
static
void
pure_virtual_called()
{
throw std::runtime_error ("pure virtual called");
}
static
error_code
pure_virtual_error ()
{
pure_virtual_called();
return boost::system::errc::make_error_code (
boost::system::errc::function_not_supported);
}
static
error_code
pure_virtual_error (error_code& ec)
{
return ec = pure_virtual_error();
}
static
void
throw_if (error_code const& ec)
{
if (ec)
throw boost::system::system_error (ec);
}
public:
virtual ~abstract_socket ()
{
}
//--------------------------------------------------------------------------
//
// abstract_socket
//
//--------------------------------------------------------------------------
/** Retrieve the underlying object.
@note If the type doesn't match, nullptr is returned or an
exception is thrown if trying to acquire a reference.
*/
/** @{ */
template <class Object>
Object& this_layer ()
{
Object* object (this->this_layer_ptr <Object> ());
if (object == nullptr)
throw std::bad_cast ();
return *object;
}
template <class Object>
Object const& this_layer () const
{
Object const* object (this->this_layer_ptr <Object> ());
if (object == nullptr)
throw std::bad_cast ();
return *object;
}
template <class Object>
Object* this_layer_ptr ()
{
return static_cast <Object*> (
this->this_layer_ptr (typeid (Object).name ()));
}
template <class Object>
Object const* this_layer_ptr () const
{
return static_cast <Object const*> (
this->this_layer_ptr (typeid (Object).name ()));
}
/** @} */
virtual void* this_layer_ptr (char const* type_name) const
BEAST_SOCKET_VIRTUAL;
//--------------------------------------------------------------------------
//
// native_handle
//
//--------------------------------------------------------------------------
/** Retrieve the native representation of the object.
Since we dont know the return type, and because almost every
asio implementation passes the result by value, you need to provide
a pointer to a default-constructed object of the matching type.
@note If the type doesn't match, an exception is thrown.
*/
template <typename Handle>
void native_handle (Handle* dest)
{
if (! native_handle (typeid (Handle).name (), dest))
throw std::bad_cast ();
}
virtual bool native_handle (char const* type_name, void* dest)
BEAST_SOCKET_VIRTUAL;
//--------------------------------------------------------------------------
//
// basic_io_object
//
//--------------------------------------------------------------------------
virtual boost::asio::io_service& get_io_service ()
BEAST_SOCKET_VIRTUAL;
//--------------------------------------------------------------------------
//
// basic_socket
//
//--------------------------------------------------------------------------
/** Retrieve the lowest layer object.
@note If the type doesn't match, nullptr is returned or an
exception is thrown if trying to acquire a reference.
*/
/** @{ */
template <class Object>
Object& lowest_layer ()
{
Object* object (this->lowest_layer_ptr <Object> ());
if (object == nullptr)
throw std::bad_cast ();
return *object;
}
template <class Object>
Object const& lowest_layer () const
{
Object const* object (this->lowest_layer_ptr <Object> ());
if (object == nullptr)
throw std::bad_cast ();
return *object;
}
template <class Object>
Object* lowest_layer_ptr ()
{
return static_cast <Object*> (
this->lowest_layer_ptr (typeid (Object).name ()));
}
template <class Object>
Object const* lowest_layer_ptr () const
{
return static_cast <Object const*> (
this->lowest_layer_ptr (typeid (Object).name ()));
}
/** @} */
virtual void* lowest_layer_ptr (char const* type_name) const
BEAST_SOCKET_VIRTUAL;
//--------------------------------------------------------------------------
void cancel ()
{
error_code ec;
cancel (ec);
throw_if (ec);
}
virtual error_code cancel (error_code& ec)
BEAST_SOCKET_VIRTUAL;
void shutdown (shutdown_type what)
{
error_code ec;
shutdown (what, ec);
throw_if (ec);
}
virtual error_code shutdown (shutdown_type what,
error_code& ec)
BEAST_SOCKET_VIRTUAL;
void close ()
{
error_code ec;
close (ec);
throw_if (ec);
}
virtual error_code close (error_code& ec)
BEAST_SOCKET_VIRTUAL;
//--------------------------------------------------------------------------
//
// basic_socket_acceptor
//
//--------------------------------------------------------------------------
virtual error_code accept (abstract_socket& peer, error_code& ec)
BEAST_SOCKET_VIRTUAL;
virtual void async_accept (abstract_socket& peer, error_handler handler)
BEAST_SOCKET_VIRTUAL;
//--------------------------------------------------------------------------
//
// basic_stream_socket
//
//--------------------------------------------------------------------------
virtual std::size_t read_some (mutable_buffers buffers, error_code& ec)
BEAST_SOCKET_VIRTUAL;
virtual std::size_t write_some (const_buffers buffers, error_code& ec)
BEAST_SOCKET_VIRTUAL;
virtual void async_read_some (mutable_buffers buffers,
transfer_handler handler)
BEAST_SOCKET_VIRTUAL;
virtual void async_write_some (const_buffers buffers,
transfer_handler handler)
BEAST_SOCKET_VIRTUAL;
//--------------------------------------------------------------------------
//
// ssl::stream
//
//--------------------------------------------------------------------------
/** Retrieve the next layer object.
@note If the type doesn't match, nullptr is returned or an
exception is thrown if trying to acquire a reference.
*/
/** @{ */
template <class Object>
Object& next_layer ()
{
Object* object (this->next_layer_ptr <Object> ());
if (object == nullptr)
throw std::bad_cast ();
return *object;
}
template <class Object>
Object const& next_layer () const
{
Object const* object (this->next_layer_ptr <Object> ());
if (object == nullptr)
throw std::bad_cast ();
return *object;
}
template <class Object>
Object* next_layer_ptr ()
{
return static_cast <Object*> (
this->next_layer_ptr (typeid (Object).name ()));
}
template <class Object>
Object const* next_layer_ptr () const
{
return static_cast <Object const*> (
this->next_layer_ptr (typeid (Object).name ()));
}
/** @} */
virtual void* next_layer_ptr (char const* type_name) const
BEAST_SOCKET_VIRTUAL;
/** Determines if the underlying stream requires a handshake.
If needs_handshake is true, it will be necessary to call handshake or
async_handshake after the connection is established. Furthermore it
will be necessary to call the shutdown member from the
HandshakeInterface to close the connection. Do not close the underlying
socket or else the closure will not be graceful. Only one side should
initiate the handshaking shutdon. The other side should observe it.
Which side does what is up to the user.
The default version returns false.
*/
virtual bool needs_handshake ()
BEAST_SOCKET_VIRTUAL;
virtual void set_verify_mode (int verify_mode)
BEAST_SOCKET_VIRTUAL;
void handshake (handshake_type type)
{
error_code ec;
handshake (type, ec);
throw_if (ec);
}
virtual error_code handshake (handshake_type type, error_code& ec)
BEAST_SOCKET_VIRTUAL;
virtual void async_handshake (handshake_type type, error_handler handler)
BEAST_SOCKET_VIRTUAL;
//--------------------------------------------------------------------------
virtual error_code handshake (handshake_type type,
const_buffers buffers, error_code& ec)
BEAST_SOCKET_VIRTUAL;
virtual void async_handshake (handshake_type type,
const_buffers buffers, transfer_handler handler)
BEAST_SOCKET_VIRTUAL;
//--------------------------------------------------------------------------
void shutdown ()
{
error_code ec;
shutdown (ec);
throw_if (ec);
}
virtual error_code shutdown (error_code& ec)
BEAST_SOCKET_VIRTUAL;
virtual void async_shutdown (error_handler handler)
BEAST_SOCKET_VIRTUAL;
};
}
}
#endif

View File

@@ -1,126 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_ASIO_BUFFER_SEQUENCE_H_INCLUDED
#define BEAST_ASIO_BUFFER_SEQUENCE_H_INCLUDED
#include <boost/asio/buffer.hpp>
#include <beast/utility/noexcept.h>
#include <algorithm>
#include <iterator>
#include <beast/cxx14/type_traits.h> // <type_traits>
#include <vector>
namespace beast {
namespace asio {
template <class Buffer>
class buffer_sequence
{
private:
typedef std::vector <Buffer> sequence_type;
public:
typedef Buffer value_type;
typedef typename sequence_type::const_iterator const_iterator;
private:
sequence_type m_buffers;
template <class FwdIter>
void assign (FwdIter first, FwdIter last)
{
m_buffers.clear();
m_buffers.reserve (std::distance (first, last));
for (;first != last; ++first)
m_buffers.push_back (*first);
}
public:
buffer_sequence ()
{
}
template <
class BufferSequence,
class = std::enable_if_t <std::is_constructible <
Buffer, typename BufferSequence::value_type>::value>
>
buffer_sequence (BufferSequence const& s)
{
assign (std::begin (s), std::end (s));
}
template <
class FwdIter,
class = std::enable_if_t <std::is_constructible <
Buffer, typename std::iterator_traits <
FwdIter>::value_type>::value>
>
buffer_sequence (FwdIter first, FwdIter last)
{
assign (first, last);
}
template <class BufferSequence>
std::enable_if_t <std::is_constructible <
Buffer, typename BufferSequence::value_type>::value,
buffer_sequence&
>
operator= (BufferSequence const& s)
{
return assign (s);
}
const_iterator
begin () const noexcept
{
return m_buffers.begin ();
}
const_iterator
end () const noexcept
{
return m_buffers.end ();
}
#if 0
template <class ConstBufferSequence>
void
assign (ConstBufferSequence const& buffers)
{
auto const n (std::distance (
std::begin (buffers), std::end (buffers)));
for (int i = 0, auto iter (std::begin (buffers));
iter != std::end (buffers); ++iter, ++i)
m_buffers[i] = Buffer (boost::asio::buffer_cast <void*> (
*iter), boost::asio::buffer_size (*iter));
}
#endif
};
typedef buffer_sequence <boost::asio::const_buffer> const_buffers;
typedef buffer_sequence <boost::asio::mutable_buffer> mutable_buffers;
}
}
#endif

View File

@@ -1,265 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_ASIO_ENABLE_WAIT_FOR_ASYNC_H_INCLUDED
#define BEAST_ASIO_ENABLE_WAIT_FOR_ASYNC_H_INCLUDED
#include <beast/asio/wrap_handler.h>
#include <beast/utility/is_call_possible.h>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
#include <boost/asio/detail/handler_cont_helpers.hpp>
#include <boost/asio/detail/handler_invoke_helpers.hpp>
#include <atomic>
#include <condition_variable>
#include <mutex>
#include <beast/cxx14/type_traits.h> // <type_traits>
namespace beast {
namespace asio {
namespace detail {
template <class Owner, class Handler>
class ref_counted_wrapped_handler
{
private:
static_assert (std::is_same <std::decay_t <Owner>, Owner>::value,
"Owner cannot be a const or reference type");
Handler m_handler;
std::reference_wrapper <Owner> m_owner;
bool m_continuation;
public:
ref_counted_wrapped_handler (Owner& owner,
Handler&& handler, bool continuation)
: m_handler (std::move (handler))
, m_owner (owner)
, m_continuation (continuation ? true :
boost_asio_handler_cont_helpers::is_continuation (m_handler))
{
m_owner.get().increment();
}
ref_counted_wrapped_handler (Owner& owner,
Handler const& handler, bool continuation)
: m_handler (handler)
, m_owner (owner)
, m_continuation (continuation ? true :
boost_asio_handler_cont_helpers::is_continuation (m_handler))
{
m_owner.get().increment();
}
~ref_counted_wrapped_handler ()
{
m_owner.get().decrement();
}
ref_counted_wrapped_handler (ref_counted_wrapped_handler const& other)
: m_handler (other.m_handler)
, m_owner (other.m_owner)
, m_continuation (other.m_continuation)
{
m_owner.get().increment();
}
ref_counted_wrapped_handler (ref_counted_wrapped_handler&& other)
: m_handler (std::move (other.m_handler))
, m_owner (other.m_owner)
, m_continuation (other.m_continuation)
{
m_owner.get().increment();
}
ref_counted_wrapped_handler& operator= (
ref_counted_wrapped_handler const&) = delete;
template <class... Args>
void
operator() (Args&&... args)
{
m_handler (std::forward <Args> (args)...);
}
template <class... Args>
void
operator() (Args&&... args) const
{
m_handler (std::forward <Args> (args)...);
}
template <class Function>
friend
void
asio_handler_invoke (Function& f,
ref_counted_wrapped_handler* h)
{
boost_asio_handler_invoke_helpers::
invoke (f, h->m_handler);
}
template <class Function>
friend
void
asio_handler_invoke (Function const& f,
ref_counted_wrapped_handler* h)
{
boost_asio_handler_invoke_helpers::
invoke (f, h->m_handler);
}
friend
void*
asio_handler_allocate (std::size_t size,
ref_counted_wrapped_handler* h)
{
return boost_asio_handler_alloc_helpers::
allocate (size, h->m_handler);
}
friend
void
asio_handler_deallocate (void* p, std::size_t size,
ref_counted_wrapped_handler* h)
{
boost_asio_handler_alloc_helpers::
deallocate (p, size, h->m_handler);
}
friend
bool
asio_handler_is_continuation (ref_counted_wrapped_handler* h)
{
return h->m_continuation;
}
};
}
//------------------------------------------------------------------------------
/** Facilitates blocking until no completion handlers are remaining.
If Derived has this member function:
@code
void on_wait_for_async (void)
@endcode
Then it will be called every time the number of pending completion
handlers transitions to zero from a non-zero value. The call is made
while holding the internal mutex.
*/
template <class Derived>
class enable_wait_for_async
{
private:
BEAST_DEFINE_IS_CALL_POSSIBLE(
has_on_wait_for_async,on_wait_for_async);
void increment()
{
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
++m_count;
}
void notify (std::true_type)
{
static_cast <Derived*> (this)->on_wait_for_async();
}
void notify (std::false_type)
{
}
void decrement()
{
std::lock_guard <decltype(m_mutex)> lock (m_mutex);
--m_count;
if (m_count == 0)
{
m_cond.notify_all();
notify (std::integral_constant <bool,
has_on_wait_for_async<Derived, void(void)>::value>());
}
}
template <class Owner, class Handler>
friend class detail::ref_counted_wrapped_handler;
std::mutex m_mutex;
std::condition_variable m_cond;
std::size_t m_count;
public:
/** Blocks if there are any pending completion handlers. */
void
wait_for_async()
{
std::unique_lock <decltype (m_mutex)> lock (m_mutex);
while (m_count != 0)
m_cond.wait (lock);
}
protected:
enable_wait_for_async()
: m_count (0)
{
}
~enable_wait_for_async()
{
assert (m_count == 0);
}
/** Wraps the specified handler so it can be counted. */
/** @{ */
template <class Handler>
detail::ref_counted_wrapped_handler <
enable_wait_for_async,
std::remove_reference_t <Handler>
>
wrap_with_counter (Handler&& handler, bool continuation = false)
{
return detail::ref_counted_wrapped_handler <enable_wait_for_async,
std::remove_reference_t <Handler>> (*this,
std::forward <Handler> (handler), continuation);
}
template <class Handler>
detail::ref_counted_wrapped_handler <
enable_wait_for_async,
std::remove_reference_t <Handler>
>
wrap_with_counter (continuation_t, Handler&& handler)
{
return detail::ref_counted_wrapped_handler <enable_wait_for_async,
std::remove_reference_t <Handler>> (*this,
std::forward <Handler> (handler), true);
}
/** @} */
};
}
}
#endif

View File

@@ -1,426 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_ASIO_MEMORY_BUFFER_H_INCLUDED
#define BEAST_ASIO_MEMORY_BUFFER_H_INCLUDED
#include <beast/utility/empty_base_optimization.h>
#include <boost/asio/buffer.hpp>
#include <beast/utility/noexcept.h>
#include <cstddef>
#include <memory>
#include <type_traits>
namespace beast {
namespace asio {
template <
class T,
class Alloc = std::allocator <T>
>
class memory_buffer
: private empty_base_optimization <Alloc>
{
private:
static_assert (std::is_same <char, T>::value ||
std::is_same <unsigned char, T>::value,
"memory_buffer only works with char and unsigned char");
typedef empty_base_optimization <Alloc> Base;
using AllocTraits = std::allocator_traits <Alloc>;
T* m_base;
std::size_t m_size;
public:
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
typedef T& reference;
typedef T const& const_reference;
typedef T* pointer;
typedef T const* const_pointer;
typedef Alloc allocator_type;
typedef T* iterator;
typedef T const* const_iterator;
typedef std::reverse_iterator <iterator> reverse_iterator;
typedef std::reverse_iterator <const_iterator> const_reverse_iterator;
memory_buffer ()
: m_base (nullptr)
, m_size (0)
{
}
memory_buffer (memory_buffer&& other)
: Base (std::move (other))
, m_base (other.m_base)
, m_size (other.m_size)
{
other.m_base = nullptr;
other.m_size = 0;
}
explicit memory_buffer (size_type n)
: m_base (AllocTraits::allocate (Base::member(), n))
, m_size (n)
{
}
explicit memory_buffer (Alloc const& alloc)
: Base (alloc)
, m_base (nullptr)
, m_size (0)
{
}
memory_buffer (size_type n, Alloc const& alloc)
: Base (alloc)
, m_base (AllocTraits::allocate (Base::member(), n))
, m_size (n)
{
}
~memory_buffer()
{
if (m_base != nullptr)
AllocTraits::deallocate (Base::member(), m_base, m_size);
}
memory_buffer& operator= (memory_buffer const&) = delete;
allocator_type
get_allocator() const
{
return Base::member;
}
//
// asio support
//
boost::asio::mutable_buffer
buffer()
{
return boost::asio::mutable_buffer (
data(), bytes());
}
boost::asio::const_buffer
buffer() const
{
return boost::asio::const_buffer (
data(), bytes());
}
boost::asio::mutable_buffers_1
buffers()
{
return boost::asio::mutable_buffers_1 (
data(), bytes());
}
boost::asio::const_buffers_1
buffers() const
{
return boost::asio::const_buffers_1 (
data(), bytes());
}
operator boost::asio::mutable_buffer()
{
return buffer();
}
operator boost::asio::const_buffer() const
{
return buffer();
}
operator boost::asio::mutable_buffers_1()
{
return buffers();
}
operator boost::asio::const_buffers_1() const
{
return buffers();
}
//
// Element access
//
reference
at (size_type pos)
{
if (! (pos < size()))
throw std::out_of_range ("bad array index");
return m_base [pos];
}
const_reference
at (size_type pos) const
{
if (! (pos < size()))
throw std::out_of_range ("bad array index");
return m_base [pos];
}
reference
operator[] (size_type pos) noexcept
{
return m_base [pos];
}
const_reference
operator[] (size_type pos) const noexcept
{
return m_base [pos];
}
reference
back() noexcept
{
return m_base [m_size - 1];
}
const_reference
back() const noexcept
{
return m_base [m_size - 1];
}
reference
front() noexcept
{
return *m_base;
}
const_reference
front() const noexcept
{
return *m_base;
}
pointer
data() noexcept
{
return m_base;
}
const_pointer
data() const noexcept
{
return m_base;
}
//
// Iterators
//
iterator
begin() noexcept
{
return m_base;
}
const_iterator
begin() const noexcept
{
return m_base;
}
const_iterator
cbegin() const noexcept
{
return m_base;
}
iterator
end() noexcept
{
return m_base + m_size;
}
const_iterator
end() const noexcept
{
return m_base + m_size;
}
const_iterator
cend() const noexcept
{
return m_base + m_size;
}
reverse_iterator
rbegin() noexcept
{
return reverse_iterator (end());
}
const_reverse_iterator
rbegin() const noexcept
{
return const_reverse_iterator (cend());
}
const_reverse_iterator
crbegin() const noexcept
{
return const_reverse_iterator (cend());
}
reverse_iterator
rend() noexcept
{
return reverse_iterator (begin());
}
const_reverse_iterator
rend() const noexcept
{
return const_reverse_iterator (cbegin());
}
const_reverse_iterator
crend() const noexcept
{
return const_reverse_iterator (cbegin());
}
//
// Capacity
//
bool
empty() const noexcept
{
return m_size == 0;
}
size_type
size() const noexcept
{
return m_size;
}
size_type
max_size() const noexcept
{
return size();
}
size_type
capacity() const noexcept
{
return size();
}
size_type bytes() const
{
return m_size * sizeof(T);
}
//
// Modifiers
//
template <class U, class A>
friend
void
swap (memory_buffer <U, A>& lhs,
memory_buffer <U, A>& rhs) noexcept;
};
//------------------------------------------------------------------------------
template <class T, class Alloc>
void
swap (memory_buffer <T, Alloc>& lhs,
memory_buffer <T, Alloc>& rhs) noexcept
{
std::swap (lhs.m_base, rhs.m_base);
std::swap (lhs.m_size, rhs.m_size);
}
template <class T, class A1, class A2>
inline
bool
operator== (memory_buffer <T, A1> const& lhs,
memory_buffer <T, A2> const& rhs)
{
return std::equal (lhs.cbegin(), lhs.cend(),
rhs.cbegin(), rhs.cend());
}
template <class T, class A1, class A2>
inline
bool
operator!= (memory_buffer <T, A1> const& lhs,
memory_buffer <T, A2> const& rhs)
{
return ! (lhs == rhs);
}
template <class T, class A1, class A2>
inline
bool
operator< (memory_buffer <T, A1> const& lhs,
memory_buffer <T, A2> const& rhs)
{
return std::lexicographical_compare (
lhs.cbegin(), lhs.cend(), rhs.cbegin(), rhs.cend());
}
template <class T, class A1, class A2>
inline
bool
operator>= (memory_buffer <T, A1> const& lhs,
memory_buffer <T, A2> const& rhs)
{
return ! (lhs < rhs);
}
template <class T, class A1, class A2>
inline
bool
operator> (memory_buffer <T, A1> const& lhs,
memory_buffer <T, A2> const& rhs)
{
return rhs < lhs;
}
template <class T, class A1, class A2>
inline
bool
operator<= (memory_buffer <T, A1> const& lhs,
memory_buffer <T, A2> const& rhs)
{
return ! (rhs < lhs);
}
}
}
#endif

View File

@@ -1,475 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_ASIO_SHARED_HANDLER_H_INCLUDED
#define BEAST_ASIO_SHARED_HANDLER_H_INCLUDED
#include <beast/Config.h>
#include <beast/utility/is_call_possible.h>
#include <boost/utility/base_from_member.hpp>
#include <boost/asio/detail/handler_alloc_helpers.hpp>
#include <boost/asio/detail/handler_cont_helpers.hpp>
#include <boost/asio/detail/handler_invoke_helpers.hpp>
#include <beast/utility/noexcept.h>
#include <functional>
#include <memory>
#include <beast/cxx14/type_traits.h> // <type_traits>
#ifndef BEAST_ASIO_NO_ALLOCATE_SHARED
#define BEAST_ASIO_NO_ALLOCATE_SHARED 0
#endif
#ifndef BEAST_ASIO_NO_HANDLER_RESULT_OF
#define BEAST_ASIO_NO_HANDLER_RESULT_OF 1
#endif
namespace beast {
namespace asio {
class shared_handler_wrapper_base
{
public:
virtual ~shared_handler_wrapper_base()
{
}
virtual void invoke (std::function <void (void)> f) = 0;
virtual void* allocate (std::size_t size) = 0;
virtual void deallocate (void* p, std::size_t size) = 0;
virtual bool is_continuation () = 0;
};
//------------------------------------------------------------------------------
template <class Signature>
class shared_handler_wrapper_func
: public shared_handler_wrapper_base
{
private:
std::function <Signature> m_func;
public:
template <class Handler>
explicit shared_handler_wrapper_func (Handler&& handler)
: m_func (std::ref (std::forward <Handler> (handler)))
{
}
template <class... Args>
#if BEAST_ASIO_NO_HANDLER_RESULT_OF
void
#else
std::result_of_t <std::function <Signature> (Args...)>
#endif
operator() (Args&&... args) const
{
return m_func (std::forward <Args> (args)...);
}
};
//------------------------------------------------------------------------------
namespace detail {
#ifdef _MSC_VER
#pragma warning (push)
#pragma warning (disable: 4512) // assignment operator could not be generated
#endif
template <class Signature, class Handler>
class shared_handler_wrapper
: private boost::base_from_member <Handler>
, public shared_handler_wrapper_func <Signature>
{
private:
typedef boost::base_from_member <Handler> Base;
BEAST_DEFINE_IS_CALL_POSSIBLE(has_is_continuation, is_continuation);
public:
shared_handler_wrapper (Handler&& handler)
: boost::base_from_member <Handler> (std::move (handler))
, shared_handler_wrapper_func <Signature> (Base::member)
{
}
shared_handler_wrapper (Handler const& handler)
: boost::base_from_member <Handler> (handler)
, shared_handler_wrapper_func <Signature> (Base::member)
{
}
private:
void
invoke (std::function <void (void)> f) override
{
return boost_asio_handler_invoke_helpers::
invoke (f, Base::member);
}
void*
allocate (std::size_t size) override
{
return boost_asio_handler_alloc_helpers::
allocate (size, Base::member);
}
void
deallocate (void* p, std::size_t size) override
{
boost_asio_handler_alloc_helpers::
deallocate (p, size, Base::member);
}
bool
is_continuation () override
{
return is_continuation (std::integral_constant <bool,
has_is_continuation <Handler, bool(void)>::value>());
}
bool
is_continuation (std::true_type)
{
return Base::member.is_continuation();
}
bool
is_continuation (std::false_type)
{
return boost_asio_handler_cont_helpers::
is_continuation (Base::member);
}
};
#ifdef _MSC_VER
#pragma warning (pop)
#endif
template <class T>
struct is_shared_handler : public std::false_type
{
};
//------------------------------------------------------------------------------
template <class T, class Handler>
class handler_allocator
{
private:
// We want a partial template specialization as a friend
// but that isn't allowed so we friend all versions. This
// should produce a compile error if Handler is not constructible
// from H.
//
template <class U, class H>
friend class handler_allocator;
Handler m_handler;
public:
typedef T value_type;
typedef T* pointer;
template <class U>
struct rebind
{
public:
typedef handler_allocator <U, Handler> other;
};
handler_allocator() = delete;
handler_allocator (Handler const& handler)
: m_handler (handler)
{
}
template <class U>
handler_allocator (
handler_allocator <U, Handler> const& other)
: m_handler (other.m_handler)
{
}
handler_allocator&
operator= (handler_allocator const&) = delete;
pointer
allocate (std::ptrdiff_t n)
{
auto const size (n * sizeof (T));
return static_cast <pointer> (
boost_asio_handler_alloc_helpers::allocate (
size, m_handler));
}
void
deallocate (pointer p, std::ptrdiff_t n)
{
auto const size (n * sizeof (T));
boost_asio_handler_alloc_helpers::deallocate (
p, size, m_handler);
}
// Work-around for MSVC not using allocator_traits
// in the implementation of shared_ptr
//
#ifdef _MSC_VER
void
destroy (T* t)
{
t->~T();
}
#endif
friend
bool
operator== (handler_allocator const& lhs, handler_allocator const& rhs)
{
return true;
}
friend
bool
operator!= (handler_allocator const& lhs, handler_allocator const& rhs)
{
return ! (lhs == rhs);
}
};
}
//------------------------------------------------------------------------------
/** Handler shared reference that provides io_service execution guarantees. */
template <class Signature>
class shared_handler
{
private:
template <class T>
friend class shared_handler_allocator;
typedef shared_handler_wrapper_func <
Signature> wrapper_type;
typedef std::shared_ptr <wrapper_type> ptr_type;
ptr_type m_ptr;
public:
shared_handler()
{
}
template <
class DeducedHandler,
class = std::enable_if_t <
! detail::is_shared_handler <
std::decay_t <DeducedHandler>>::value &&
std::is_constructible <std::function <Signature>,
std::decay_t <DeducedHandler>>::value
>
>
shared_handler (DeducedHandler&& handler)
{
typedef std::remove_reference_t <DeducedHandler> Handler;
#if BEAST_ASIO_NO_ALLOCATE_SHARED
m_ptr = std::make_shared <detail::shared_handler_wrapper <
Signature, Handler>> (std::forward <DeducedHandler> (handler));
#else
m_ptr = std::allocate_shared <detail::shared_handler_wrapper <
Signature, Handler>> (detail::handler_allocator <char, Handler> (
handler), std::forward <DeducedHandler> (handler));
#endif
}
shared_handler (shared_handler&& other)
: m_ptr (std::move (other.m_ptr))
{
}
shared_handler (shared_handler const& other)
: m_ptr (other.m_ptr)
{
}
shared_handler&
operator= (std::nullptr_t)
{
m_ptr = nullptr;
return *this;
}
shared_handler&
operator= (shared_handler const& rhs)
{
m_ptr = rhs.m_ptr;
return *this;
}
shared_handler&
operator= (shared_handler&& rhs)
{
m_ptr = std::move (rhs.m_ptr);
return *this;
}
explicit
operator bool() const noexcept
{
return m_ptr.operator bool();
}
void
reset()
{
m_ptr.reset();
}
template <class... Args>
#if BEAST_ASIO_NO_HANDLER_RESULT_OF
void
#else
std::result_of_t <std::function <Signature> (Args...)>
#endif
operator() (Args&&... args) const
{
return (*m_ptr)(std::forward <Args> (args)...);
}
template <class Function>
friend
void
asio_handler_invoke (Function&& f, shared_handler* h)
{
return h->m_ptr->invoke (f);
}
friend
void*
asio_handler_allocate (
std::size_t size, shared_handler* h)
{
return h->m_ptr->allocate (size);
}
friend
void
asio_handler_deallocate (
void* p, std::size_t size, shared_handler* h)
{
return h->m_ptr->deallocate (p, size);
}
friend
bool
asio_handler_is_continuation (
shared_handler* h)
{
return h->m_ptr->is_continuation ();
}
};
//------------------------------------------------------------------------------
namespace detail {
template <
class Signature
>
struct is_shared_handler <
shared_handler <Signature>
> : public std::true_type
{
};
}
//------------------------------------------------------------------------------
template <class T>
class shared_handler_allocator
{
private:
template <class U>
friend class shared_handler_allocator;
std::shared_ptr <shared_handler_wrapper_base> m_ptr;
public:
typedef T value_type;
typedef T* pointer;
shared_handler_allocator() = delete;
template <class Signature>
shared_handler_allocator (
shared_handler <Signature> const& handler)
: m_ptr (handler.m_ptr)
{
}
template <class U>
shared_handler_allocator (
shared_handler_allocator <U> const& other)
: m_ptr (other.m_ptr)
{
}
pointer
allocate (std::ptrdiff_t n)
{
auto const size (n * sizeof (T));
return static_cast <pointer> (
m_ptr->allocate (size));
}
void
deallocate (pointer p, std::ptrdiff_t n)
{
auto const size (n * sizeof (T));
m_ptr->deallocate (p, size);
}
friend
bool
operator== (shared_handler_allocator const& lhs,
shared_handler_allocator const& rhs)
{
return lhs.m_ptr == rhs.m_ptr;
}
friend
bool
operator!= (shared_handler_allocator const& lhs,
shared_handler_allocator const& rhs)
{
return ! (lhs == rhs);
}
};
}
}
#endif

View File

@@ -1,826 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_ASIO_SOCKET_WRAPPER_H_INCLUDED
#define BEAST_ASIO_SOCKET_WRAPPER_H_INCLUDED
#include <beast/asio/abstract_socket.h>
#include <beast/asio/bind_handler.h>
#include <beast/utility/noexcept.h>
namespace beast {
namespace asio {
/** Wraps a reference to any object and exports all availble interfaces.
If the object does not support an interface, calling those
member functions will behave as if a pure virtual was called.
Note that only a reference to the underlying is stored. Management
of the lifetime of the object is controlled by the caller.
Examples of the type of Object:
asio::ip::tcp::socket
asio::ip::tcp::socket&
asio::ssl::stream <asio::ip::tcp::socket>
asio::ssl::stream <asio::ip::tcp::socket&>
explain arg must be an io_context
explain socket_wrapper will create and take ownership of the tcp::socket
explain this_layer_type will be tcp::socket
explain next_layer () returns a asio::ip::tcp::socket&
explain lowest_layer () returns a asio::ip::tcp::socket&
asio::ssl::stream <asio::buffered_stream <asio::ip::tcp::socket> > >
This makes my head explode
*/
template <typename Object>
class socket_wrapper : public abstract_socket
{
private:
Object m_object;
public:
template <class... Args>
explicit socket_wrapper (Args&&... args)
: m_object (std::forward <Args> (args)...)
{
}
socket_wrapper (socket_wrapper const&) = delete;
socket_wrapper& operator= (socket_wrapper const&) = delete;
//--------------------------------------------------------------------------
//
// socket_wrapper
//
//--------------------------------------------------------------------------
/** The type of the object being wrapped. */
typedef typename boost::remove_reference <Object>::type this_layer_type;
/** Get a reference to this layer. */
this_layer_type& this_layer () noexcept
{
return m_object;
}
/** Get a const reference to this layer. */
this_layer_type const& this_layer () const noexcept
{
return m_object;
}
//--------------------------------------------------------------------------
//
// abstract_socket
//
//--------------------------------------------------------------------------
void* this_layer_ptr (char const* type_name) const override
{
char const* const name (typeid (this_layer_type).name ());
if (strcmp (name, type_name) == 0)
return const_cast <void*> (static_cast <void const*> (&m_object));
return nullptr;
}
private:
BEAST_DEFINE_IS_CALL_POSSIBLE(has_get_io_service, get_io_service);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_lowest_layer, lowest_layer);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_cancel, cancel);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_shutdown, shutdown);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_close, close);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_accept, accept);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_async_accept, async_accept);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_read_some, read_some);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_write_some, write_some);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_async_read_some, async_read_some);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_async_write_some, async_write_some);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_set_verify_mode, set_verify_mode);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_handshake, handshake);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_async_handshake, async_handshake);
BEAST_DEFINE_IS_CALL_POSSIBLE(has_async_shutdown, async_shutdown);
//--------------------------------------------------------------------------
//
// Implementation
//
//--------------------------------------------------------------------------
template <class Cond>
struct Enabled : public std::integral_constant <bool, Cond::value>
{
};
//--------------------------------------------------------------------------
//
// native_handle
//
//--------------------------------------------------------------------------
#if 0
// This is a potential work-around for the problem with
// the has_type_native_handle_type template, but requires
// Boost 1.54 or later.
//
// This include will be needed:
//
// boost/tti/has_type.hpp
//
//
BOOST_TTI_HAS_TYPE(native_handle_type)
#else
template <class T>
struct has_type_native_handle_type
{
typedef char yes;
typedef struct {char dummy[2];} no;
template <class C> static yes f(typename C::native_handle_type*);
template <class C> static no f(...);
#ifdef _MSC_VER
static bool const value = sizeof(f<T>(0)) == 1;
#else
// This line fails to compile under Visual Studio 2012
static bool const value = sizeof(
has_type_native_handle_type<T>::f<T>(0)) == 1;
#endif
};
#endif
template <typename T,
bool Exists = has_type_native_handle_type <T>::value
>
struct extract_native_handle_type
{
typedef typename T::native_handle_type type;
};
template <typename T>
struct extract_native_handle_type <T, false>
{
typedef void type;
};
// This will be void if native_handle_type doesn't exist in Object
typedef typename extract_native_handle_type <
this_layer_type>::type native_handle_type;
//--------------------------------------------------------------------------
bool native_handle (char const* type_name, void* dest) override
{
return native_handle (type_name, dest,
Enabled <has_type_native_handle_type <this_layer_type> > ());
}
bool native_handle (char const* type_name, void* dest,
std::true_type)
{
char const* const name (typeid (
typename this_layer_type::native_handle_type).name ());
if (strcmp (name, type_name) == 0)
{
native_handle_type* const p (reinterpret_cast <
native_handle_type*> (dest));
*p = m_object.native_handle ();
return true;
}
return false;
}
bool native_handle (char const*, void*,
std::false_type)
{
pure_virtual_called();
return false;
}
//--------------------------------------------------------------------------
//
// basic_io_object
//
//--------------------------------------------------------------------------
boost::asio::io_service& get_io_service () override
{
return get_io_service (
Enabled <has_get_io_service <this_layer_type,
boost::asio::io_service&()> > ());
}
boost::asio::io_service& get_io_service (
std::true_type)
{
return m_object.get_io_service ();
}
boost::asio::io_service& get_io_service (
std::false_type)
{
pure_virtual_called();
return *static_cast <boost::asio::io_service*>(nullptr);
}
//--------------------------------------------------------------------------
//
// basic_socket
//
//--------------------------------------------------------------------------
/*
To forward the lowest_layer_type type, we need to make sure it
exists in Object. This is a little more tricky than just figuring
out if Object has a particular member function.
The problem is boost::asio::basic_socket_acceptor, which doesn't
have lowest_layer () or lowest_layer_type ().
*/
template <class T>
struct has_type_lowest_layer_type
{
typedef char yes;
typedef struct {char dummy[2];} no;
template <class C> static yes f(typename C::lowest_layer_type*);
template <class C> static no f(...);
#ifdef _MSC_VER
static bool const value = sizeof(f<T>(0)) == 1;
#else
// This line fails to compile under Visual Studio 2012
static bool const value = sizeof(has_type_lowest_layer_type<T>::f<T>(0)) == 1;
#endif
};
template <typename T, bool Exists = has_type_lowest_layer_type <T>::value >
struct extract_lowest_layer_type
{
typedef typename T::lowest_layer_type type;
};
template <typename T>
struct extract_lowest_layer_type <T, false>
{
typedef void type;
};
// This will be void if lowest_layer_type doesn't exist in Object
typedef typename extract_lowest_layer_type <this_layer_type>::type lowest_layer_type;
//--------------------------------------------------------------------------
void* lowest_layer_ptr (char const* type_name) const override
{
return lowest_layer_ptr (type_name,
Enabled <has_type_lowest_layer_type <this_layer_type> > ());
}
void* lowest_layer_ptr (char const* type_name,
std::true_type) const
{
char const* const name (typeid (typename this_layer_type::lowest_layer_type).name ());
if (strcmp (name, type_name) == 0)
return const_cast <void*> (static_cast <void const*> (&m_object.lowest_layer ()));
return nullptr;
}
void* lowest_layer_ptr (char const*,
std::false_type) const
{
pure_virtual_called();
return nullptr;
}
//--------------------------------------------------------------------------
error_code cancel (error_code& ec) override
{
return cancel (ec,
Enabled <has_cancel <this_layer_type,
error_code (error_code&)> > ());
}
error_code cancel (error_code& ec,
std::true_type)
{
return m_object.cancel (ec);
}
error_code cancel (error_code& ec,
std::false_type)
{
return pure_virtual_error (ec);
}
//--------------------------------------------------------------------------
error_code shutdown (shutdown_type what, error_code& ec) override
{
return shutdown (what, ec,
Enabled <has_shutdown <this_layer_type,
error_code (shutdown_type, error_code&)> > ());
}
error_code shutdown (shutdown_type what, error_code& ec,
std::true_type)
{
return m_object.shutdown (what, ec);
}
error_code shutdown (shutdown_type, error_code& ec,
std::false_type)
{
return pure_virtual_error (ec);
}
//--------------------------------------------------------------------------
error_code close (error_code& ec) override
{
return close (ec,
Enabled <has_close <this_layer_type,
error_code (error_code&)> > ());
}
error_code close (error_code& ec,
std::true_type)
{
return m_object.close (ec);
}
error_code close (error_code& ec,
std::false_type)
{
return pure_virtual_error (ec);
}
//--------------------------------------------------------------------------
//
// basic_socket_acceptor
//
//--------------------------------------------------------------------------
// Extracts the underlying socket type from the protocol of another asio object
template <typename T, typename Enable = void>
struct native_socket
{
typedef void* socket_type;
inline native_socket (abstract_socket&)
: m_socket (nullptr)
{
abstract_socket::pure_virtual_called();
}
inline socket_type& get ()
{
abstract_socket::pure_virtual_called();
return m_socket;
}
inline socket_type& operator-> ()
{
return get ();
}
private:
socket_type m_socket;
};
// Enabled if T::protocol_type::socket exists as a type
template <typename T>
struct native_socket <T, typename boost::enable_if <boost::is_class <
typename T::protocol_type::socket> >::type>
{
typedef typename T::protocol_type::socket socket_type;
inline native_socket (abstract_socket& peer)
: m_socket_ptr (&peer.this_layer <socket_type> ())
{
}
inline socket_type& get () noexcept
{
return *m_socket_ptr;
}
inline socket_type& operator-> () noexcept
{
return get ();
}
private:
socket_type* m_socket_ptr;
};
//--------------------------------------------------------------------------
error_code accept (abstract_socket& peer, error_code& ec) override
{
typedef typename native_socket <this_layer_type>::socket_type socket_type;
return accept (peer, ec,
Enabled <has_accept <this_layer_type,
error_code (socket_type&, error_code&)> > ());
}
error_code accept (abstract_socket& peer, error_code& ec,
std::true_type)
{
return m_object.accept (
native_socket <this_layer_type> (peer).get (), ec);
}
error_code accept (abstract_socket&, error_code& ec,
std::false_type)
{
return pure_virtual_error (ec);
}
//--------------------------------------------------------------------------
void async_accept (abstract_socket& peer, error_handler handler) override
{
typedef typename native_socket <this_layer_type>::socket_type socket_type;
async_accept (peer, handler,
Enabled <has_async_accept <this_layer_type,
void (socket_type&, error_handler)> > ());
}
void async_accept (abstract_socket& peer, error_handler const& handler,
std::true_type)
{
m_object.async_accept (
native_socket <this_layer_type> (peer).get (), handler);
}
void async_accept (abstract_socket&, error_handler const& handler,
std::false_type)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error()));
}
//--------------------------------------------------------------------------
//
// basic_stream_socket
//
//--------------------------------------------------------------------------
std::size_t
read_some (mutable_buffers buffers, error_code& ec) override
{
return read_some (buffers, ec,
Enabled <has_read_some <this_layer_type,
std::size_t (mutable_buffers const&, error_code&)> > ());
}
std::size_t
read_some (mutable_buffers const& buffers, error_code& ec,
std::true_type)
{
return m_object.read_some (buffers, ec);
}
std::size_t read_some (mutable_buffers const&, error_code& ec,
std::false_type)
{
ec = pure_virtual_error ();
return 0;
}
//--------------------------------------------------------------------------
std::size_t
write_some (const_buffers buffers, error_code& ec) override
{
return write_some (buffers, ec,
Enabled <has_write_some <this_layer_type,
std::size_t (const_buffers const&, error_code&)> > ());
}
std::size_t
write_some (const_buffers const& buffers, error_code& ec,
std::true_type)
{
return m_object.write_some (buffers, ec);
}
std::size_t
write_some (const_buffers const&, error_code& ec,
std::false_type)
{
ec = pure_virtual_error ();
return 0;
}
//--------------------------------------------------------------------------
void async_read_some (mutable_buffers buffers,
transfer_handler handler) override
{
async_read_some (buffers, handler,
Enabled <has_async_read_some <this_layer_type,
void (mutable_buffers const&, transfer_handler const&)> > ());
}
void
async_read_some (mutable_buffers const& buffers,
transfer_handler const& handler,
std::true_type)
{
m_object.async_read_some (buffers, handler);
}
void
async_read_some (mutable_buffers const&,
transfer_handler const& handler,
std::false_type)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error(), 0));
}
//--------------------------------------------------------------------------
void
async_write_some (const_buffers buffers,
transfer_handler handler) override
{
async_write_some (buffers, handler,
Enabled <has_async_write_some <this_layer_type,
void (const_buffers const&, transfer_handler const&)> > ());
}
void
async_write_some (const_buffers const& buffers,
transfer_handler const& handler,
std::true_type)
{
m_object.async_write_some (buffers, handler);
}
void
async_write_some (const_buffers const&,
transfer_handler const& handler,
std::false_type)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error(), 0));
}
//--------------------------------------------------------------------------
//
// ssl::stream
//
//--------------------------------------------------------------------------
template <class T>
struct has_type_next_layer_type
{
typedef char yes;
typedef struct {char dummy[2];} no;
template <class C> static yes f(typename C::next_layer_type*);
template <class C> static no f(...);
#ifdef _MSC_VER
static bool const value = sizeof(f<T>(0)) == 1;
#else
// This line fails to compile under Visual Studio 2012
static bool const value = sizeof(has_type_next_layer_type<T>::f<T>(0)) == 1;
#endif
};
template <typename T, bool Exists = has_type_next_layer_type <T>::value >
struct extract_next_layer_type
{
typedef typename T::next_layer_type type;
};
template <typename T>
struct extract_next_layer_type <T, false>
{
typedef void type;
};
// This will be void if next_layer_type doesn't exist in Object
typedef typename extract_next_layer_type <this_layer_type>::type next_layer_type;
//--------------------------------------------------------------------------
void* next_layer_ptr (char const* type_name) const override
{
return next_layer_ptr (type_name,
Enabled <has_type_next_layer_type <this_layer_type> > ());
}
void* next_layer_ptr (char const* type_name,
std::true_type) const
{
char const* const name (typeid (typename this_layer_type::next_layer_type).name ());
if (strcmp (name, type_name) == 0)
return const_cast <void*> (static_cast <void const*> (&m_object.next_layer ()));
return nullptr;
}
void* next_layer_ptr (char const*,
std::false_type) const
{
pure_virtual_called();
return nullptr;
}
//--------------------------------------------------------------------------
bool needs_handshake () override
{
return
has_handshake <this_layer_type,
error_code (handshake_type, error_code&)>::value ||
has_async_handshake <this_layer_type,
void (handshake_type, error_handler)>::value;
}
//--------------------------------------------------------------------------
void set_verify_mode (int verify_mode) override
{
set_verify_mode (verify_mode,
Enabled <has_set_verify_mode <this_layer_type,
void (int)> > ());
}
void set_verify_mode (int verify_mode,
std::true_type)
{
m_object.set_verify_mode (verify_mode);
}
void set_verify_mode (int,
std::false_type)
{
pure_virtual_called();
}
//--------------------------------------------------------------------------
error_code
handshake (handshake_type type, error_code& ec) override
{
return handshake (type, ec,
Enabled <has_handshake <this_layer_type,
error_code (handshake_type, error_code&)> > ());
}
error_code
handshake (handshake_type type, error_code& ec,
std::true_type)
{
return m_object.handshake (type, ec);
}
error_code
handshake (handshake_type, error_code& ec,
std::false_type)
{
return pure_virtual_error (ec);
}
//--------------------------------------------------------------------------
void async_handshake (handshake_type type, error_handler handler) override
{
async_handshake (type, handler,
Enabled <has_async_handshake <this_layer_type,
void (handshake_type, error_handler)> > ());
}
void async_handshake (handshake_type type, error_handler const& handler,
std::true_type)
{
m_object.async_handshake (type, handler);
}
void async_handshake (handshake_type, error_handler const& handler,
std::false_type)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error()));
}
//--------------------------------------------------------------------------
error_code
handshake (handshake_type type, const_buffers buffers,
error_code& ec) override
{
return handshake (type, buffers, ec,
Enabled <has_handshake <this_layer_type,
error_code (handshake_type, const_buffers const&, error_code&)> > ());
}
error_code
handshake (handshake_type type, const_buffers const& buffers,
error_code& ec,
std::true_type)
{
return m_object.handshake (type, buffers, ec);
}
error_code
handshake (handshake_type, const_buffers const&,
error_code& ec,
std::false_type)
{
return pure_virtual_error (ec);
}
//--------------------------------------------------------------------------
void async_handshake (handshake_type type,
const_buffers buffers, transfer_handler handler) override
{
async_handshake (type, buffers, handler,
Enabled <has_async_handshake <this_layer_type,
void (handshake_type, const_buffers const&,
transfer_handler)> > ());
}
void async_handshake (handshake_type type, const_buffers const& buffers,
transfer_handler const& handler,
std::true_type)
{
m_object.async_handshake (type, buffers, handler);
}
void async_handshake (handshake_type, const_buffers const&,
transfer_handler const& handler,
std::false_type)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error(), 0));
}
//--------------------------------------------------------------------------
error_code shutdown (error_code& ec) override
{
return shutdown (ec,
Enabled <has_shutdown <this_layer_type,
error_code (error_code&)> > ());
}
error_code shutdown (error_code& ec,
std::true_type)
{
return m_object.shutdown (ec);
}
error_code shutdown (error_code& ec,
std::false_type)
{
return pure_virtual_error (ec);
}
//--------------------------------------------------------------------------
void async_shutdown (error_handler handler) override
{
async_shutdown (handler,
Enabled <has_async_shutdown <this_layer_type,
void (error_handler)> > ());
}
void async_shutdown (error_handler const& handler,
std::true_type)
{
m_object.async_shutdown (handler);
}
void async_shutdown (error_handler const& handler,
std::false_type)
{
get_io_service ().post (bind_handler (
handler, pure_virtual_error()));
}
};
}
}
#endif

View File

@@ -17,20 +17,23 @@
*/
//==============================================================================
#ifndef BEAST_HTTP_GET_H_INCLUDED
#define BEAST_HTTP_GET_H_INCLUDED
#ifndef BEAST_ASIO_SSL_H_INCLUDED
#define BEAST_ASIO_SSL_H_INCLUDED
#include <boost/asio/ssl/error.hpp>
#include <boost/system/error_code.hpp>
#include <string>
#include <utility>
namespace beast {
namespace http {
namespace asio {
/** Perform simple HTTP GET to retrieve a resource as a string. */
std::pair <std::string, boost::system::error_code>
get (std::string const& url_string);
/** Returns `true` if the error code is a SSL "short read." */
inline
bool
is_short_read (boost::system::error_code const& ec)
{
return (ec.category() == boost::asio::error::get_ssl_category())
&& (ERR_GET_REASON(ec.value()) == SSL_R_SHORT_READ);
}
}
}

View File

@@ -17,36 +17,41 @@
*/
//==============================================================================
#ifndef BEAST_ASIO_TESTS_TESTPEERDETAILS_H_INCLUDED
#define BEAST_ASIO_TESTS_TESTPEERDETAILS_H_INCLUDED
#ifndef BEAST_ASIO_SSL_BUNDLE_H_INCLUDED
#define BEAST_ASIO_SSL_BUNDLE_H_INCLUDED
#include <beast/asio/abstract_socket.h>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/ssl/stream.hpp>
#include <utility>
namespace beast {
namespace asio {
/** Base class of all detail objects. */
class TestPeerDetails
/** Work-around for non-movable boost::ssl::stream.
This allows ssl::stream to be movable and allows the stream to
construct from an already-existing socket.
*/
struct ssl_bundle
{
public:
virtual ~TestPeerDetails () { }
typedef boost::asio::ip::tcp::socket socket_type;
typedef boost::asio::ssl::stream <socket_type&> stream_type;
virtual String name () const = 0;
template <class... Args>
ssl_bundle (boost::asio::ssl::context& context, Args&&... args);
virtual abstract_socket& get_socket () = 0;
virtual abstract_socket& get_acceptor () = 0;
boost::asio::io_service& get_io_service ()
{
return m_io_service;
}
private:
boost::asio::io_service m_io_service;
socket_type socket;
stream_type stream;
};
}
template <class... Args>
ssl_bundle::ssl_bundle (boost::asio::ssl::context& context,
Args&&... args)
: socket(std::forward<Args>(args)...)
, stream (socket, context)
{
}
} // asio
} // beast
#endif

View File

@@ -1,105 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#if BEAST_INCLUDE_BEASTCONFIG
#include <BeastConfig.h>
#endif
#include <beast/unit_test/suite.h>
#include <beast/asio/bind_handler.h>
#include <beast/asio/enable_wait_for_async.h>
#include <boost/asio/io_service.hpp>
namespace beast {
class enable_wait_for_async_test : public unit_test::suite
{
public:
typedef boost::system::error_code error_code;
void test()
{
struct handler
{
void operator()(error_code)
{
}
};
struct owner : asio::enable_wait_for_async <owner>
{
bool notified;
owner()
: notified (false)
{
}
void operator()()
{
{
boost::asio::io_service ios;
ios.post (asio::bind_handler (handler(),
error_code()));
ios.run();
ios.reset();
wait_for_async();
}
{
boost::asio::io_service ios;
ios.post (wrap_with_counter (asio::bind_handler (
handler(), error_code())));
ios.run();
wait_for_async();
}
{
boost::asio::io_service ios;
handler h;
ios.post (wrap_with_counter (std::bind (
&handler::operator(), &h,
error_code())));
ios.run();
wait_for_async();
}
}
void on_wait_for_async()
{
notified = true;
}
};
owner o;
o();
expect (o.notified);
}
void run()
{
test();
}
};
BEAST_DEFINE_TESTSUITE(enable_wait_for_async,asio,beast);
}

View File

@@ -1,235 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#if BEAST_INCLUDE_BEASTCONFIG
#include <BeastConfig.h>
#endif
#include <beast/unit_test/suite.h>
#include <beast/asio/shared_handler.h>
// Disables is_constructible tests for std::function
// Visual Studio std::function fails the is_constructible tests
#ifndef BEAST_NO_STD_FUNCTION_CONSTRUCTIBLE
# ifdef _MSC_VER
# define BEAST_NO_STD_FUNCTION_CONSTRUCTIBLE 1
# else
# define BEAST_NO_STD_FUNCTION_CONSTRUCTIBLE 0
# endif
#endif
namespace beast {
class shared_handler_test : public unit_test::suite
{
public:
struct test_results
{
bool call;
bool invoke;
bool alloc;
bool dealloc;
bool cont;
test_results ()
: call (false)
, invoke (false)
, alloc (false)
, dealloc (false)
, cont (false)
{
}
};
struct test_handler
{
std::reference_wrapper <test_results> results;
explicit test_handler (test_results& results_)
: results (results_)
{
}
void operator() ()
{
results.get().call = true;
}
template <class Function>
friend void asio_handler_invoke (
Function& f, test_handler* h)
{
h->results.get().invoke = true;
f();
}
template <class Function>
friend void asio_handler_invoke (
Function const& f, test_handler* h)
{
h->results.get().invoke = true;
f();
}
friend void* asio_handler_allocate (
std::size_t size, test_handler* h)
{
h->results.get().alloc = true;
return boost::asio::asio_handler_allocate (size);
}
friend void asio_handler_deallocate (
void* p, std::size_t size, test_handler* h)
{
h->results.get().dealloc = true;
boost::asio::asio_handler_deallocate (p, size);
}
friend bool asio_handler_is_continuation (
test_handler* h)
{
h->results.get().cont = true;
return true;
}
};
struct test_invokable
{
bool call;
test_invokable ()
: call (false)
{
}
void operator() ()
{
call = true;
}
};
template <class Handler>
bool async_op (Handler&& handler)
{
void* const p (boost_asio_handler_alloc_helpers::allocate (32, handler));
handler();
boost_asio_handler_alloc_helpers::deallocate (p, 32, handler);
return boost_asio_handler_cont_helpers::is_continuation (handler);
}
void virtual_async_op (asio::shared_handler <void(void)> handler)
{
async_op (handler);
}
void run()
{
#if ! BEAST_NO_STD_FUNCTION_CONSTRUCTIBLE
static_assert (! std::is_constructible <
std::function <void(void)>, int&&>::value,
"Cannot construct std::function from int&&");
static_assert (! std::is_constructible <
std::function <void(void)>, int>::value,
"Cannot construct std::function from int");
static_assert (! std::is_constructible <
asio::shared_handler <void(void)>, int>::value,
"Cannot construct shared_handler from int");
#endif
static_assert (std::is_constructible <
asio::shared_handler <void(int)>,
asio::shared_handler <void(int)>>::value,
"Should construct <void(int)> from <void(int)>");
static_assert (! std::is_constructible <
asio::shared_handler <void(int)>,
asio::shared_handler <void(void)>>::value,
"Can't construct <void(int)> from <void(void)>");
// Hooks called when using the raw handler
{
test_results r;
test_handler h (r);
async_op (h);
expect (r.call);
expect (r.alloc);
expect (r.dealloc);
expect (r.cont);
test_invokable f;
boost_asio_handler_invoke_helpers::invoke (std::ref (f), h);
expect (r.invoke);
expect (f.call);
}
// Use of std::function shows the hooks not getting called
{
test_results r;
std::function <void(void)> fh ((test_handler) (r));
async_op (fh);
expect (r.call);
unexpected (r.alloc);
unexpected (r.dealloc);
unexpected (r.cont);
test_invokable f;
boost_asio_handler_invoke_helpers::invoke (std::ref (f), fh);
unexpected (r.invoke);
expect (f.call);
}
// Make sure shared_handler calls the hooks
{
test_results r;
asio::shared_handler <void(void)> sh ((test_handler)(r));
async_op (sh);
expect (r.call);
expect (r.alloc);
expect (r.dealloc);
expect (r.cont);
test_invokable f;
boost_asio_handler_invoke_helpers::invoke (std::ref (f), sh);
expect (r.invoke);
expect (f.call);
}
// Make sure shared_handler via implicit conversion calls hooks
{
test_results r;
test_handler h (r);
virtual_async_op ((test_handler) (r));
expect (r.call);
expect (r.alloc);
expect (r.dealloc);
expect (r.cont);
}
}
};
BEAST_DEFINE_TESTSUITE(shared_handler,asio,beast);
}

View File

@@ -1,280 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#if BEAST_INCLUDE_BEASTCONFIG
#include <BeastConfig.h>
#endif
#include <beast/unit_test/suite.h>
#include <beast/asio/wrap_handler.h>
#include <boost/version.hpp>
#include <boost/bind.hpp>
#include <functional>
#include <memory>
namespace beast {
namespace asio {
//------------------------------------------------------------------------------
// Displays the order of destruction of parameters in the bind wrapper
//
class boost_bind_test : public unit_test::suite
{
public:
struct Result
{
std::string text;
void push_back (std::string const& s)
{
if (! text.empty())
text += ", ";
text += s;
}
};
struct Payload
{
std::reference_wrapper <Result> m_result;
std::string m_name;
explicit Payload (Result& result, std::string const& name)
: m_result (result)
, m_name (name)
{
}
~Payload ()
{
m_result.get().push_back (m_name);
}
};
struct Arg
{
std::shared_ptr <Payload> m_payload;
Arg (Result& result, std::string const& name)
: m_payload (std::make_shared <Payload> (result, name))
{
}
};
static void foo (Arg const&, Arg const&, Arg const&)
{
}
void run()
{
{
Result r;
{
boost::bind (&foo,
Arg (r, "one"),
Arg (r, "two"),
Arg (r, "three"));
}
log <<
std::string ("boost::bind (") + r.text + ")";
}
{
Result r;
{
std::bind (&foo,
Arg (r, "one"),
Arg (r, "two"),
Arg (r, "three"));
}
log <<
std::string ("std::bind (") + r.text + ")";
}
pass();
}
};
BEAST_DEFINE_TESTSUITE(boost_bind,asio,beast);
//------------------------------------------------------------------------------
class wrap_handler_test : public unit_test::suite
{
public:
struct test_results
{
bool call;
bool invoke;
bool alloc;
bool dealloc;
bool cont;
test_results ()
: call (false)
, invoke (false)
, alloc (false)
, dealloc (false)
, cont (false)
{
}
};
struct test_handler
{
std::reference_wrapper <test_results> results;
explicit test_handler (test_results& results_)
: results (results_)
{
}
void operator() ()
{
results.get().call = true;
}
template <class Function>
friend void asio_handler_invoke (
Function& f, test_handler* h)
{
h->results.get().invoke = true;
f();
}
template <class Function>
friend void asio_handler_invoke (
Function const& f, test_handler* h)
{
h->results.get().invoke = true;
f();
}
friend void* asio_handler_allocate (
std::size_t, test_handler* h)
{
h->results.get().alloc = true;
return nullptr;
}
friend void asio_handler_deallocate (
void*, std::size_t, test_handler* h)
{
h->results.get().dealloc = true;
}
friend bool asio_handler_is_continuation (
test_handler* h)
{
h->results.get().cont = true;
return true;
}
};
struct test_invokable
{
bool call;
test_invokable ()
: call (false)
{
}
void operator() ()
{
call = true;
}
};
template <class Handler>
bool async_op (Handler&& handler)
{
void* const p (boost_asio_handler_alloc_helpers::allocate (32, handler));
(handler)();
boost_asio_handler_alloc_helpers::deallocate (p, 32, handler);
return boost_asio_handler_cont_helpers::is_continuation (handler);
}
void run()
{
// Hooks called when using the raw handler
{
test_results r;
test_handler h (r);
async_op (h);
expect (r.call);
expect (r.alloc);
expect (r.dealloc);
expect (r.cont);
test_invokable f;
boost_asio_handler_invoke_helpers::invoke (std::ref (f), h);
expect (r.invoke);
expect (f.call);
}
// Use of boost::bind shows the hooks not getting called
{
test_results r;
test_handler h (r);
auto b (std::bind (&test_handler::operator(), &h));
async_op (b);
expect (r.call);
unexpected (r.alloc);
unexpected (r.dealloc);
unexpected (r.cont);
test_invokable f;
boost_asio_handler_invoke_helpers::invoke (std::ref (f), b);
unexpected (r.invoke);
expect (f.call);
}
// Make sure the wrapped handler calls the hooks
{
test_results r;
test_handler h (r);
auto w (wrap_handler (
std::bind (&test_handler::operator(), test_handler(r)), h));
async_op (w);
expect (r.call);
expect (r.alloc);
expect (r.dealloc);
expect (r.cont);
test_invokable f;
boost_asio_handler_invoke_helpers::invoke (std::ref (f), w);
expect (r.invoke);
expect (f.call);
}
}
};
BEAST_DEFINE_TESTSUITE(wrap_handler,asio,beast);
}
}

View File

@@ -1,176 +0,0 @@
//------------------------------------------------------------------------------
/*
This file is part of Beast: https://github.com/vinniefalco/Beast
Copyright 2013, Vinnie Falco <vinnie.falco@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#ifndef BEAST_ASIO_WRAP_HANDLER_H_INCLUDED
#define BEAST_ASIO_WRAP_HANDLER_H_INCLUDED
#include <boost/asio/detail/handler_alloc_helpers.hpp>
#include <boost/asio/detail/handler_cont_helpers.hpp>
#include <boost/asio/detail/handler_invoke_helpers.hpp>
#include <beast/cxx14/type_traits.h> // <type_traits>
#include <utility>
namespace beast {
namespace asio {
#ifdef _MSC_VER
#pragma warning (push)
#pragma warning (disable: 4512) // assignment operator could not be generated
#endif
namespace detail {
/** A handler which wraps another handler using a specfic context.
The handler is invoked with the same io_service execution guarantees
as the provided context.
@note A copy of Context is made.
*/
template <class Handler, class Context>
class wrapped_handler
{
private:
Handler m_handler;
Context m_context;
bool m_continuation;
// If this goes off, consider carefully what the intent is.
static_assert (! std::is_reference <Handler>::value,
"Handler should not be a reference type");
public:
wrapped_handler (bool continuation, Handler&& handler, Context context)
: m_handler (std::move (handler))
, m_context (context)
, m_continuation (continuation ? true :
boost_asio_handler_cont_helpers::is_continuation (context))
{
}
wrapped_handler (bool continuation, Handler const& handler, Context context)
: m_handler (handler)
, m_context (context)
, m_continuation (continuation ? true :
boost_asio_handler_cont_helpers::is_continuation (context))
{
}
template <class... Args>
void
operator() (Args&&... args)
{
m_handler (std::forward <Args> (args)...);
}
template <class... Args>
void
operator() (Args&&... args) const
{
m_handler (std::forward <Args> (args)...);
}
template <class Function>
friend
void
asio_handler_invoke (Function& f, wrapped_handler* h)
{
boost_asio_handler_invoke_helpers::
invoke (f, h->m_context);
}
template <class Function>
friend
void
asio_handler_invoke (Function const& f, wrapped_handler* h)
{
boost_asio_handler_invoke_helpers::
invoke (f, h->m_context);
}
friend
void*
asio_handler_allocate (std::size_t size, wrapped_handler* h)
{
return boost_asio_handler_alloc_helpers::
allocate (size, h->m_context);
}
friend
void
asio_handler_deallocate (void* p, std::size_t size, wrapped_handler* h)
{
boost_asio_handler_alloc_helpers::
deallocate (p, size, h->m_context);
}
friend
bool
asio_handler_is_continuation (wrapped_handler* h)
{
return h->m_continuation;
}
};
}
//------------------------------------------------------------------------------
// Tag for dispatching wrap_handler with is_continuation == true
enum continuation_t
{
continuation
};
/** Returns a wrapped handler so it executes within another context.
The handler is invoked with the same io_service execution guarantees
as the provided context. The handler will be copied if necessary.
@note A copy of Context is made.
*/
/** @{ */
template <class DeducedHandler, class Context>
detail::wrapped_handler <
std::remove_reference_t <DeducedHandler>,
Context
>
wrap_handler (DeducedHandler&& handler, Context const& context,
bool continuation = false)
{
typedef std::remove_reference_t <DeducedHandler> Handler;
return detail::wrapped_handler <Handler, Context> (continuation,
std::forward <DeducedHandler> (handler), context);
}
template <class DeducedHandler, class Context>
detail::wrapped_handler <
std::remove_reference_t <DeducedHandler>,
Context
>
wrap_handler (continuation_t, DeducedHandler&& handler,
Context const& context)
{
typedef std::remove_reference_t <DeducedHandler> Handler;
return detail::wrapped_handler <Handler, Context> (true,
std::forward <DeducedHandler> (handler), context);
}
/** @} */
}
}
#endif

View File

@@ -256,7 +256,7 @@ std::string RelativeTime::to_string () const
}
}
#if BEAST_WINDOWS
#include <windows.h>
@@ -266,12 +266,12 @@ namespace detail {
static double monotonicCurrentTimeInSeconds()
{
return GetTickCount64() / 1000.0;
return GetTickCount64() / 1000.0;
}
}
}
#elif BEAST_MAC || BEAST_IOS
#include <mach/mach_time.h>
@@ -279,39 +279,39 @@ static double monotonicCurrentTimeInSeconds()
namespace beast {
namespace detail {
static double monotonicCurrentTimeInSeconds()
{
struct StaticInitializer
{
StaticInitializer ()
{
double numerator;
double denominator;
struct StaticInitializer
{
StaticInitializer ()
{
double numerator;
double denominator;
mach_timebase_info_data_t timebase;
(void) mach_timebase_info (&timebase);
if (timebase.numer % 1000000 == 0)
{
numerator = timebase.numer / 1000000.0;
denominator = timebase.denom * 1000.0;
}
else
{
numerator = timebase.numer;
mach_timebase_info_data_t timebase;
(void) mach_timebase_info (&timebase);
if (timebase.numer % 1000000 == 0)
{
numerator = timebase.numer / 1000000.0;
denominator = timebase.denom * 1000.0;
}
else
{
numerator = timebase.numer;
// VFALCO NOTE I don't understand this code
//denominator = timebase.denom * (std::uint64_t) 1000000 * 1000.0;
//denominator = timebase.denom * (std::uint64_t) 1000000 * 1000.0;
denominator = timebase.denom * 1000000000.0;
}
ratio = numerator / denominator;
}
}
ratio = numerator / denominator;
}
double ratio;
};
static StaticInitializer const data;
double ratio;
};
static StaticInitializer const data;
return mach_absolute_time() * data.ratio;
}
@@ -319,7 +319,7 @@ static double monotonicCurrentTimeInSeconds()
}
#else
#include <time.h>
namespace beast {
@@ -327,14 +327,14 @@ namespace detail {
static double monotonicCurrentTimeInSeconds()
{
timespec t;
clock_gettime (CLOCK_MONOTONIC, &t);
return t.tv_sec + t.tv_nsec / 1000000000.0;
timespec t;
clock_gettime (CLOCK_MONOTONIC, &t);
return t.tv_sec + t.tv_nsec / 1000000000.0;
}
}
}
#endif
namespace beast {
@@ -343,37 +343,37 @@ namespace detail {
// Records and returns the time from process startup
static double getStartupTime()
{
struct StaticInitializer
{
StaticInitializer ()
{
struct StaticInitializer
{
StaticInitializer ()
{
when = detail::monotonicCurrentTimeInSeconds();
}
double when;
};
};
static StaticInitializer const data;
static StaticInitializer const data;
return data.when;
return data.when;
}
// Used to call getStartupTime as early as possible
struct StartupTimeStaticInitializer
{
StartupTimeStaticInitializer ()
{
StartupTimeStaticInitializer ()
{
getStartupTime();
}
};
static StartupTimeStaticInitializer startupTimeStaticInitializer;
}
RelativeTime RelativeTime::fromStartup ()
{
return RelativeTime (
return RelativeTime (
detail::monotonicCurrentTimeInSeconds() - detail::getStartupTime());
}

View File

@@ -109,9 +109,12 @@ extern void beast_reportFatalError (char const* message, char const* fileName, i
/** Writes a string to the standard error stream.
This is only compiled in a debug build.
@see Logger::outputDebugString
*/
#define BDBG(dbgtext) { beast::String tempDbgBuf; tempDbgBuf << dbgtext; beast::Logger::outputDebugString (tempDbgBuf); }
#define BDBG(dbgtext) { \
beast::String tempDbgBuf; \
tempDbgBuf << dbgtext; \
beast::outputDebugString (tempDbgBuf.toStdString ()); \
}
#if 0
/** This will always cause an assertion failure.

View File

@@ -22,6 +22,7 @@
#endif
#include <beast/container/impl/spookyv2.cpp>
#include <beast/container/impl/siphash.cpp>
#include <beast/container/tests/aged_associative_container.test.cpp>
#include <beast/container/tests/buffer_view.test.cpp>

View File

@@ -141,154 +141,154 @@ Here is a short example demonstrating its use.
\snippet cyclic_iterator.cpp cyclic_iterator
*/
template<
typename ContainerIterator
typename ContainerIterator
>
class cyclic_iterator
:
public detail::cyclic_iterator_base<
ContainerIterator
>::type
public detail::cyclic_iterator_base<
ContainerIterator
>::type
{
public:
/**
\brief The base type which is a <code>boost::iterator_facade</code>
*/
typedef typename detail::cyclic_iterator_base<
ContainerIterator
>::type base_type;
/**
\brief The base type which is a <code>boost::iterator_facade</code>
*/
typedef typename detail::cyclic_iterator_base<
ContainerIterator
>::type base_type;
/**
\brief The underlying iterator type
*/
typedef ContainerIterator container_iterator_type;
/**
\brief The underlying iterator type
*/
typedef ContainerIterator container_iterator_type;
/**
\brief The value type adapted from \a ContainerIterator
*/
typedef typename base_type::value_type value_type;
/**
\brief The value type adapted from \a ContainerIterator
*/
typedef typename base_type::value_type value_type;
/**
\brief The reference type adapted from \a ContainerIterator
*/
typedef typename base_type::reference reference;
/**
\brief The reference type adapted from \a ContainerIterator
*/
typedef typename base_type::reference reference;
/**
\brief The pointer type adapted from \a ContainerIterator
*/
typedef typename base_type::pointer pointer;
/**
\brief The pointer type adapted from \a ContainerIterator
*/
typedef typename base_type::pointer pointer;
/**
\brief The difference type adapted from \a ContainerIterator
*/
typedef typename base_type::difference_type difference_type;
/**
\brief The difference type adapted from \a ContainerIterator
*/
typedef typename base_type::difference_type difference_type;
/**
\brief The iterator category, either Forward or Bidirectional
*/
typedef typename base_type::iterator_category iterator_category;
/**
\brief The iterator category, either Forward or Bidirectional
*/
typedef typename base_type::iterator_category iterator_category;
/**
\brief Creates a singular iterator
*/
cyclic_iterator();
/**
\brief Creates a singular iterator
*/
cyclic_iterator();
/**
\brief Copy constructs from another cyclic iterator
/**
\brief Copy constructs from another cyclic iterator
Copy constructs from another cyclic iterator \a other. This only works
if the underlying iterators are convertible.
Copy constructs from another cyclic iterator \a other. This only works
if the underlying iterators are convertible.
\param other The iterator to copy construct from
*/
template<
typename OtherIterator
>
explicit
cyclic_iterator(
cyclic_iterator<OtherIterator> const &other
);
\param other The iterator to copy construct from
*/
template<
typename OtherIterator
>
explicit
cyclic_iterator(
cyclic_iterator<OtherIterator> const &other
);
/**
\brief Constructs a new cyclic iterator
/**
\brief Constructs a new cyclic iterator
Constructs a new cyclic iterator, starting at \a it, inside
a range from \a begin to \a end.
Constructs a new cyclic iterator, starting at \a it, inside
a range from \a begin to \a end.
\param pos The start of the iterator
\param begin The beginning of the range
\param end The end of the range
\param pos The start of the iterator
\param begin The beginning of the range
\param end The end of the range
\warning The behaviour is undefined if \a pos isn't between \a begin
and \a end. Also, the behaviour is undefined, if \a begin and \a end
don't form a valid range.
*/
cyclic_iterator(
container_iterator_type const &pos,
container_iterator_type const &begin,
container_iterator_type const &end
);
\warning The behaviour is undefined if \a pos isn't between \a begin
and \a end. Also, the behaviour is undefined, if \a begin and \a end
don't form a valid range.
*/
cyclic_iterator(
container_iterator_type const &pos,
container_iterator_type const &begin,
container_iterator_type const &end
);
/**
\brief Assigns from another cyclic iterator
/**
\brief Assigns from another cyclic iterator
Assigns from another cyclic iterator \a other. This only works if the
underlying iterators are convertible.
Assigns from another cyclic iterator \a other. This only works if the
underlying iterators are convertible.
\param other The iterator to assign from
\param other The iterator to assign from
\return <code>*this</code>
*/
template<
typename OtherIterator
>
cyclic_iterator<ContainerIterator> &
operator=(
cyclic_iterator<OtherIterator> const &other
);
\return <code>*this</code>
*/
template<
typename OtherIterator
>
cyclic_iterator<ContainerIterator> &
operator=(
cyclic_iterator<OtherIterator> const &other
);
/**
\brief Returns the beginning of the range
*/
container_iterator_type
begin() const;
/**
\brief Returns the beginning of the range
*/
container_iterator_type
begin() const;
/**
\brief Returns the end of the range
*/
container_iterator_type
end() const;
/**
\brief Returns the end of the range
*/
container_iterator_type
end() const;
/**
\brief Returns the underlying iterator
*/
container_iterator_type
get() const;
/**
\brief Returns the underlying iterator
*/
container_iterator_type
get() const;
private:
friend class boost::iterator_core_access;
friend class boost::iterator_core_access;
void
increment();
void
increment();
void
decrement();
void
decrement();
bool
equal(
cyclic_iterator const &
) const;
bool
equal(
cyclic_iterator const &
) const;
reference
dereference() const;
reference
dereference() const;
difference_type
distance_to(
cyclic_iterator const &
) const;
difference_type
distance_to(
cyclic_iterator const &
) const;
private:
container_iterator_type
it_,
begin_,
end_;
container_iterator_type
it_,
begin_,
end_;
};
//

View File

@@ -37,7 +37,7 @@ namespace detail {
template <
bool is_const,
class Iterator,
class Base =
class Base =
std::iterator <
typename std::iterator_traits <Iterator>::iterator_category,
typename std::conditional <is_const,
@@ -51,20 +51,46 @@ class aged_container_iterator
public:
typedef typename Iterator::value_type::stashed::time_point time_point;
// Could be '= default', but Visual Studio 2013 chokes on it [Aug 2014]
aged_container_iterator ()
{
}
template <class OtherIterator, class OtherBase>
aged_container_iterator (aged_container_iterator <
false, OtherIterator, OtherBase> const& other)
// copy constructor
aged_container_iterator (
aged_container_iterator<is_const, Iterator, Base>
const& other) = default;
// Disable constructing a const_iterator from a non-const_iterator.
// Converting between reverse and non-reverse iterators should be explicit.
template <bool other_is_const, class OtherIterator, class OtherBase,
class = typename std::enable_if <
(other_is_const == false || is_const == true) &&
std::is_same<Iterator, OtherIterator>::value == false>::type>
explicit aged_container_iterator (aged_container_iterator <
other_is_const, OtherIterator, OtherBase> const& other)
: m_iter (other.m_iter)
{
}
// Disable constructing a const_iterator from a non-const_iterator.
template <bool other_is_const, class OtherBase,
class = typename std::enable_if <
other_is_const == false || is_const == true>::type>
aged_container_iterator (aged_container_iterator <
other_is_const, Iterator, OtherBase> const& other)
: m_iter (other.m_iter)
{
}
// Disable assigning a const_iterator to a non-const iterator
template <bool other_is_const, class OtherIterator, class OtherBase>
aged_container_iterator& operator= (aged_container_iterator <
other_is_const, OtherIterator, OtherBase> const& other)
auto
operator= (aged_container_iterator <
other_is_const, OtherIterator, OtherBase> const& other) ->
typename std::enable_if <
other_is_const == false || is_const == true,
aged_container_iterator&>::type
{
m_iter = other.m_iter;
return *this;

View File

@@ -42,6 +42,18 @@
namespace beast {
namespace detail {
// Traits templates used to discern reverse_iterators, which are disallowed
// for mutating operations.
template <class It>
struct is_boost_reverse_iterator
: std::false_type
{};
template <class It>
struct is_boost_reverse_iterator<boost::intrusive::detail::reverse_iterator<It>>
: std::true_type
{};
/** Associative container where each element is also indexed by time.
This container mirrors the interface of the standard library ordered
@@ -203,7 +215,7 @@ private:
{
return this->member() (k, extract (e.value));
}
template <class K>
bool operator() (element const& e, K const& k) const
{
@@ -215,7 +227,7 @@ private:
{
return this->member() (k, extract (e.value));
}
bool operator() (element const& e, Key const& k) const
{
return this->member() (extract (e.value), k);
@@ -377,20 +389,36 @@ private:
template <class... Args>
element* new_element (Args&&... args)
{
element* const p (
ElementAllocatorTraits::allocate (m_config.alloc(), 1));
struct Deleter
{
std::reference_wrapper <ElementAllocator> a_;
Deleter (ElementAllocator& a)
: a_(a)
{
}
void
operator()(element* p)
{
ElementAllocatorTraits::deallocate (a_.get(), p, 1);
}
};
std::unique_ptr <element, Deleter> p (ElementAllocatorTraits::allocate (
m_config.alloc(), 1), Deleter(m_config.alloc()));
ElementAllocatorTraits::construct (m_config.alloc(),
p, clock().now(), std::forward <Args> (args)...);
return p;
p.get(), clock().now(), std::forward <Args> (args)...);
return p.release();
}
void delete_element (element* p)
void delete_element (element const* p)
{
ElementAllocatorTraits::destroy (m_config.alloc(), p);
ElementAllocatorTraits::deallocate (m_config.alloc(), p, 1);
ElementAllocatorTraits::deallocate (
m_config.alloc(), const_cast<element*>(p), 1);
}
void unlink_and_delete_element (element* p)
void unlink_and_delete_element (element const* p)
{
chronological.list.erase (
chronological.list.iterator_to (*p));
@@ -412,11 +440,13 @@ public:
typedef typename std::allocator_traits <
Allocator>::const_pointer const_pointer;
typedef detail::aged_container_iterator <false,
// A set (that is, !IsMap) iterator is aways const because the elements
// of a set are immutable.
typedef detail::aged_container_iterator <!IsMap,
typename cont_type::iterator> iterator;
typedef detail::aged_container_iterator <true,
typename cont_type::iterator> const_iterator;
typedef detail::aged_container_iterator <false,
typedef detail::aged_container_iterator <!IsMap,
typename cont_type::reverse_iterator> reverse_iterator;
typedef detail::aged_container_iterator <true,
typename cont_type::reverse_iterator> const_reverse_iterator;
@@ -433,11 +463,13 @@ public:
class chronological_t
{
public:
typedef detail::aged_container_iterator <false,
// A set (that is, !IsMap) iterator is aways const because the elements
// of a set are immutable.
typedef detail::aged_container_iterator <!IsMap,
typename list_type::iterator> iterator;
typedef detail::aged_container_iterator <true,
typename list_type::iterator> const_iterator;
typedef detail::aged_container_iterator <false,
typedef detail::aged_container_iterator <!IsMap,
typename list_type::reverse_iterator> reverse_iterator;
typedef detail::aged_container_iterator <true,
typename list_type::reverse_iterator> const_reverse_iterator;
@@ -823,7 +855,7 @@ public:
template <bool maybe_multi = IsMulti>
typename std::enable_if <maybe_multi,
iterator>::type
insert (const_iterator const& /*hint*/, value_type const& value)
insert (const_iterator /*hint*/, value_type const& value)
{
// VFALCO TODO Figure out how to utilize 'hint'
return insert (value);
@@ -840,7 +872,7 @@ public:
template <bool maybe_multi = IsMulti>
typename std::enable_if <maybe_multi,
iterator>::type
insert (const_iterator const& /*hint*/, value_type&& value)
insert (const_iterator /*hint*/, value_type&& value)
{
// VFALCO TODO Figure out how to utilize 'hint'
return insert (std::move (value));
@@ -882,7 +914,7 @@ public:
template <class InputIt>
void
insert (InputIt first, InputIt const& last)
insert (InputIt first, InputIt last)
{
for (; first != last; ++first)
insert (cend(), *first);
@@ -911,7 +943,7 @@ public:
// map, set
template <bool maybe_multi = IsMulti, class... Args>
auto
emplace_hint (const_iterator const& hint, Args&&... args) ->
emplace_hint (const_iterator hint, Args&&... args) ->
typename std::enable_if <! maybe_multi,
std::pair <iterator, bool>>::type;
@@ -919,24 +951,26 @@ public:
template <bool maybe_multi = IsMulti, class... Args>
typename std::enable_if <maybe_multi,
iterator>::type
emplace_hint (const_iterator const& /*hint*/, Args&&... args)
emplace_hint (const_iterator /*hint*/, Args&&... args)
{
// VFALCO TODO Figure out how to utilize 'hint'
return emplace <maybe_multi> (
std::forward <Args> (args)...);
}
template <bool is_const, class Iterator, class Base>
// enable_if prevents erase (reverse_iterator pos) from compiling
template <bool is_const, class Iterator, class Base,
class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
detail::aged_container_iterator <false, Iterator, Base>
erase (detail::aged_container_iterator <
is_const, Iterator, Base> const& pos);
erase (detail::aged_container_iterator <is_const, Iterator, Base> pos);
template <bool is_const, class Iterator, class Base>
// enable_if prevents erase (reverse_iterator first, reverse_iterator last)
// from compiling
template <bool is_const, class Iterator, class Base,
class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
detail::aged_container_iterator <false, Iterator, Base>
erase (detail::aged_container_iterator <
is_const, Iterator, Base> first,
detail::aged_container_iterator <
is_const, Iterator, Base> const& last);
erase (detail::aged_container_iterator <is_const, Iterator, Base> first,
detail::aged_container_iterator <is_const, Iterator, Base> last);
template <class K>
auto
@@ -948,10 +982,11 @@ public:
//--------------------------------------------------------------------------
template <bool is_const, class Iterator, class Base>
// enable_if prevents touch (reverse_iterator pos) from compiling
template <bool is_const, class Iterator, class Base,
class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
void
touch (detail::aged_container_iterator <
is_const, Iterator, Base> const& pos)
touch (detail::aged_container_iterator <is_const, Iterator, Base> pos)
{
touch (pos, clock().now());
}
@@ -1047,7 +1082,7 @@ public:
const_iterator
upper_bound (K const& k) const
{
return const_iterator (m_cont.upper_bound (k,
return const_iterator (m_cont.upper_bound (k,
std::cref (m_config.key_compare())));
}
@@ -1176,10 +1211,12 @@ public:
}
private:
template <bool is_const, class Iterator, class Base>
// enable_if prevents erase (reverse_iterator pos, now) from compiling
template <bool is_const, class Iterator, class Base,
class = std::enable_if_t<!is_boost_reverse_iterator<Iterator>::value>>
void
touch (detail::aged_container_iterator <
is_const, Iterator, Base> const& pos,
is_const, Iterator, Base> pos,
typename clock_type::time_point const& now);
template <bool maybe_propagate = std::allocator_traits <
@@ -1466,8 +1503,8 @@ operator[] (Key const& key)
element* const p (new_element (
std::piecewise_construct, std::forward_as_tuple (key),
std::forward_as_tuple ()));
chronological.list.push_back (*p);
m_cont.insert_commit (*p, d);
chronological.list.push_back (*p);
return p->value.second;
}
return result.first->value.second;
@@ -1489,8 +1526,8 @@ operator[] (Key&& key)
std::piecewise_construct,
std::forward_as_tuple (std::move (key)),
std::forward_as_tuple ()));
chronological.list.push_back (*p);
m_cont.insert_commit (*p, d);
chronological.list.push_back (*p);
return p->value.second;
}
return result.first->value.second;
@@ -1527,8 +1564,8 @@ insert (value_type const& value) ->
if (result.second)
{
element* const p (new_element (value));
chronological.list.push_back (*p);
auto const iter (m_cont.insert_commit (*p, d));
chronological.list.push_back (*p);
return std::make_pair (iterator (iter), true);
}
return std::make_pair (iterator (result.first), false);
@@ -1568,8 +1605,8 @@ insert (value_type&& value) ->
if (result.second)
{
element* const p (new_element (std::move (value)));
chronological.list.push_back (*p);
auto const iter (m_cont.insert_commit (*p, d));
chronological.list.push_back (*p);
return std::make_pair (iterator (iter), true);
}
return std::make_pair (iterator (result.first), false);
@@ -1611,8 +1648,8 @@ insert (const_iterator hint, value_type const& value) ->
if (result.second)
{
element* const p (new_element (value));
chronological.list.push_back (*p);
auto const iter (m_cont.insert_commit (*p, d));
chronological.list.push_back (*p);
return iterator (iter);
}
return iterator (result.first);
@@ -1634,8 +1671,8 @@ insert (const_iterator hint, value_type&& value) ->
if (result.second)
{
element* const p (new_element (std::move (value)));
chronological.list.push_back (*p);
auto const iter (m_cont.insert_commit (*p, d));
chronological.list.push_back (*p);
return iterator (iter);
}
return iterator (result.first);
@@ -1660,8 +1697,8 @@ emplace (Args&&... args) ->
std::cref (m_config.key_compare()), d));
if (result.second)
{
chronological.list.push_back (*p);
auto const iter (m_cont.insert_commit (*p, d));
chronological.list.push_back (*p);
return std::make_pair (iterator (iter), true);
}
delete_element (p);
@@ -1693,7 +1730,7 @@ template <bool IsMulti, bool IsMap, class Key, class T,
template <bool maybe_multi, class... Args>
auto
aged_ordered_container <IsMulti, IsMap, Key, T, Duration, Compare, Allocator>::
emplace_hint (const_iterator const& hint, Args&&... args) ->
emplace_hint (const_iterator hint, Args&&... args) ->
typename std::enable_if <! maybe_multi,
std::pair <iterator, bool>>::type
{
@@ -1706,8 +1743,8 @@ emplace_hint (const_iterator const& hint, Args&&... args) ->
extract (p->value), std::cref (m_config.key_compare()), d));
if (result.second)
{
chronological.list.push_back (*p);
auto const iter (m_cont.insert_commit (*p, d));
chronological.list.push_back (*p);
return std::make_pair (iterator (iter), true);
}
delete_element (p);
@@ -1716,36 +1753,27 @@ emplace_hint (const_iterator const& hint, Args&&... args) ->
template <bool IsMulti, bool IsMap, class Key, class T,
class Duration, class Compare, class Allocator>
template <bool is_const, class Iterator, class Base>
auto
template <bool is_const, class Iterator, class Base, class>
detail::aged_container_iterator <false, Iterator, Base>
aged_ordered_container <IsMulti, IsMap, Key, T, Duration, Compare, Allocator>::
erase (detail::aged_container_iterator <
is_const, Iterator, Base> const& pos) ->
detail::aged_container_iterator <false, Iterator, Base>
erase (detail::aged_container_iterator <is_const, Iterator, Base> pos)
{
auto iter (pos.iterator());
auto p (&*iter++);
unlink_and_delete_element (p);
unlink_and_delete_element(&*((pos++).iterator()));
return detail::aged_container_iterator <
false, Iterator, Base> (iter);
false, Iterator, Base> (pos.iterator());
}
template <bool IsMulti, bool IsMap, class Key, class T,
class Duration, class Compare, class Allocator>
template <bool is_const, class Iterator, class Base>
auto
template <bool is_const, class Iterator, class Base, class>
detail::aged_container_iterator <false, Iterator, Base>
aged_ordered_container <IsMulti, IsMap, Key, T, Duration, Compare, Allocator>::
erase (detail::aged_container_iterator <
is_const, Iterator, Base> first,
detail::aged_container_iterator <
is_const, Iterator, Base> const& last) ->
detail::aged_container_iterator <false, Iterator, Base>
erase (detail::aged_container_iterator <is_const, Iterator, Base> first,
detail::aged_container_iterator <is_const, Iterator, Base> last)
{
for (; first != last;)
{
auto p (&*first++);
unlink_and_delete_element (p);
}
unlink_and_delete_element(&*((first++).iterator()));
return detail::aged_container_iterator <
false, Iterator, Base> (first.iterator());
}
@@ -1839,11 +1867,11 @@ operator== (
template <bool IsMulti, bool IsMap, class Key, class T,
class Duration, class Compare, class Allocator>
template <bool is_const, class Iterator, class Base>
template <bool is_const, class Iterator, class Base, class>
void
aged_ordered_container <IsMulti, IsMap, Key, T, Duration, Compare, Allocator>::
touch (detail::aged_container_iterator <
is_const, Iterator, Base> const& pos,
is_const, Iterator, Base> pos,
typename clock_type::time_point const& now)
{
auto& e (*pos.iterator());

View File

@@ -186,7 +186,7 @@ private:
{
return this->member() (extract (e.value));
}
Hash& hash_function()
{
return this->member();
@@ -222,7 +222,7 @@ private:
{
return this->member() (k, extract (e.value));
}
template <class K>
bool operator() (element const& e, K const& k) const
{
@@ -234,7 +234,7 @@ private:
{
return this->member() (k, extract (e.value));
}
bool operator() (element const& e, Key const& k) const
{
return this->member() (extract (e.value), k);
@@ -577,20 +577,36 @@ private:
template <class... Args>
element* new_element (Args&&... args)
{
element* const p (
ElementAllocatorTraits::allocate (m_config.alloc(), 1));
struct Deleter
{
std::reference_wrapper <ElementAllocator> a_;
Deleter (ElementAllocator& a)
: a_(a)
{
}
void
operator()(element* p)
{
ElementAllocatorTraits::deallocate (a_.get(), p, 1);
}
};
std::unique_ptr <element, Deleter> p (ElementAllocatorTraits::allocate (
m_config.alloc(), 1), Deleter(m_config.alloc()));
ElementAllocatorTraits::construct (m_config.alloc(),
p, clock().now(), std::forward <Args> (args)...);
return p;
p.get(), clock().now(), std::forward <Args> (args)...);
return p.release();
}
void delete_element (element* p)
void delete_element (element const* p)
{
ElementAllocatorTraits::destroy (m_config.alloc(), p);
ElementAllocatorTraits::deallocate (m_config.alloc(), p, 1);
ElementAllocatorTraits::deallocate (
m_config.alloc(), const_cast<element*>(p), 1);
}
void unlink_and_delete_element (element* p)
void unlink_and_delete_element (element const* p)
{
chronological.list.erase (
chronological.list.iterator_to (*p));
@@ -609,12 +625,14 @@ public:
typedef typename std::allocator_traits <
Allocator>::const_pointer const_pointer;
typedef detail::aged_container_iterator <false,
// A set (that is, !IsMap) iterator is aways const because the elements
// of a set are immutable.
typedef detail::aged_container_iterator <!IsMap,
typename cont_type::iterator> iterator;
typedef detail::aged_container_iterator <true,
typename cont_type::iterator> const_iterator;
typedef detail::aged_container_iterator <false,
typedef detail::aged_container_iterator <!IsMap,
typename cont_type::local_iterator> local_iterator;
typedef detail::aged_container_iterator <true,
typename cont_type::local_iterator> const_local_iterator;
@@ -631,11 +649,13 @@ public:
class chronological_t
{
public:
typedef detail::aged_container_iterator <false,
// A set (that is, !IsMap) iterator is aways const because the elements
// of a set are immutable.
typedef detail::aged_container_iterator <!IsMap,
typename list_type::iterator> iterator;
typedef detail::aged_container_iterator <true,
typename list_type::iterator> const_iterator;
typedef detail::aged_container_iterator <false,
typedef detail::aged_container_iterator <!IsMap,
typename list_type::reverse_iterator> reverse_iterator;
typedef detail::aged_container_iterator <true,
typename list_type::reverse_iterator> const_reverse_iterator;
@@ -1021,7 +1041,7 @@ public:
template <bool maybe_multi = IsMulti>
typename std::enable_if <maybe_multi,
iterator>::type
insert (const_iterator const& /*hint*/, value_type const& value)
insert (const_iterator /*hint*/, value_type const& value)
{
// VFALCO TODO The hint could be used to let
// the client order equal ranges
@@ -1043,7 +1063,7 @@ public:
template <bool maybe_multi = IsMulti>
typename std::enable_if <maybe_multi,
iterator>::type
insert (const_iterator const& /*hint*/, value_type&& value)
insert (const_iterator /*hint*/, value_type&& value)
{
// VFALCO TODO The hint could be used to let
// the client order equal ranges
@@ -1083,7 +1103,7 @@ public:
}
template <class InputIt>
void insert (InputIt first, InputIt const& last)
void insert (InputIt first, InputIt last)
{
insert (first, last,
typename std::iterator_traits <
@@ -1113,7 +1133,7 @@ public:
// set, map
template <bool maybe_multi = IsMulti, class... Args>
auto
emplace_hint (const_iterator const& /*hint*/, Args&&... args) ->
emplace_hint (const_iterator /*hint*/, Args&&... args) ->
typename std::enable_if <! maybe_multi,
std::pair <iterator, bool>>::type;
@@ -1121,7 +1141,7 @@ public:
template <bool maybe_multi = IsMulti, class... Args>
typename std::enable_if <maybe_multi,
iterator>::type
emplace_hint (const_iterator const& /*hint*/, Args&&... args)
emplace_hint (const_iterator /*hint*/, Args&&... args)
{
// VFALCO TODO The hint could be used for multi, to let
// the client order equal ranges
@@ -1132,14 +1152,14 @@ public:
template <bool is_const, class Iterator, class Base>
detail::aged_container_iterator <false, Iterator, Base>
erase (detail::aged_container_iterator <
is_const, Iterator, Base> const& pos);
is_const, Iterator, Base> pos);
template <bool is_const, class Iterator, class Base>
detail::aged_container_iterator <false, Iterator, Base>
erase (detail::aged_container_iterator <
is_const, Iterator, Base> first,
detail::aged_container_iterator <
is_const, Iterator, Base> const& last);
is_const, Iterator, Base> last);
template <class K>
auto
@@ -1152,7 +1172,7 @@ public:
template <bool is_const, class Iterator, class Base>
void
touch (detail::aged_container_iterator <
is_const, Iterator, Base> const& pos)
is_const, Iterator, Base> pos)
{
touch (pos, clock().now());
}
@@ -1349,24 +1369,11 @@ public:
class OtherAllocator,
bool maybe_multi = IsMulti
>
typename std::enable_if <! maybe_multi,
bool>::type
typename std::enable_if <! maybe_multi, bool>::type
operator== (
aged_unordered_container <false, OtherIsMap,
OtherKey, OtherT, OtherDuration, OtherHash, KeyEqual,
OtherAllocator> const& other) const
{
if (size() != other.size())
return false;
for (auto iter (cbegin()), last (cend()), olast (other.cend());
iter != last; ++iter)
{
auto oiter (other.find (extract (*iter)));
if (oiter == olast)
return false;
}
return true;
}
OtherAllocator> const& other) const;
template <
bool OtherIsMap,
@@ -1377,35 +1384,11 @@ public:
class OtherAllocator,
bool maybe_multi = IsMulti
>
typename std::enable_if <maybe_multi,
bool>::type
typename std::enable_if <maybe_multi, bool>::type
operator== (
aged_unordered_container <true, OtherIsMap,
OtherKey, OtherT, OtherDuration, OtherHash, KeyEqual,
OtherAllocator> const& other) const
{
if (size() != other.size())
return false;
typedef std::pair <const_iterator, const_iterator> EqRng;
for (auto iter (cbegin()), last (cend()); iter != last;)
{
auto const& k (extract (*iter));
auto const eq (equal_range (k));
auto const oeq (other.equal_range (k));
#if BEAST_NO_CXX14_IS_PERMUTATION
if (std::distance (eq.first, eq.second) !=
std::distance (oeq.first, oeq.second) ||
! std::is_permutation (eq.first, eq.second, oeq.first))
return false;
#else
if (! std::is_permutation (eq.first,
eq.second, oeq.first, oeq.second))
return false;
#endif
iter = eq.second;
}
return true;
}
OtherAllocator> const& other) const;
template <
bool OtherIsMulti,
@@ -1456,7 +1439,7 @@ private:
template <class InputIt>
void
insert_unchecked (InputIt first, InputIt const& last)
insert_unchecked (InputIt first, InputIt last)
{
for (; first != last; ++first)
insert_unchecked (*first);
@@ -1464,7 +1447,7 @@ private:
template <class InputIt>
void
insert (InputIt first, InputIt const& last,
insert (InputIt first, InputIt last,
std::input_iterator_tag)
{
for (; first != last; ++first)
@@ -1473,7 +1456,7 @@ private:
template <class InputIt>
void
insert (InputIt first, InputIt const& last,
insert (InputIt first, InputIt last,
std::random_access_iterator_tag)
{
auto const n (std::distance (first, last));
@@ -1484,7 +1467,7 @@ private:
template <bool is_const, class Iterator, class Base>
void
touch (detail::aged_container_iterator <
is_const, Iterator, Base> const& pos,
is_const, Iterator, Base> pos,
typename clock_type::time_point const& now)
{
auto& e (*pos.iterator());
@@ -1811,7 +1794,7 @@ aged_unordered_container <IsMulti, IsMap, Key, T, Duration,
Hash, KeyEqual, Allocator>::
aged_unordered_container (aged_unordered_container&& other)
: m_config (std::move (other.m_config))
, m_buck (m_config.alloc())
, m_buck (std::move (other.m_buck))
, m_cont (std::move (other.m_cont))
{
chronological.list = std::move (other.chronological.list);
@@ -2074,8 +2057,8 @@ operator[] (Key const& key)
std::piecewise_construct,
std::forward_as_tuple (key),
std::forward_as_tuple ()));
chronological.list.push_back (*p);
m_cont.insert_commit (*p, d);
chronological.list.push_back (*p);
return p->value.second;
}
return result.first->value.second;
@@ -2100,8 +2083,8 @@ operator[] (Key&& key)
std::piecewise_construct,
std::forward_as_tuple (std::move (key)),
std::forward_as_tuple ()));
chronological.list.push_back (*p);
m_cont.insert_commit (*p, d);
chronological.list.push_back (*p);
return p->value.second;
}
return result.first->value.second;
@@ -2143,8 +2126,8 @@ insert (value_type const& value) ->
if (result.second)
{
element* const p (new_element (value));
chronological.list.push_back (*p);
auto const iter (m_cont.insert_commit (*p, d));
chronological.list.push_back (*p);
return std::make_pair (iterator (iter), true);
}
return std::make_pair (iterator (result.first), false);
@@ -2187,8 +2170,8 @@ insert (value_type&& value) ->
if (result.second)
{
element* const p (new_element (std::move (value)));
chronological.list.push_back (*p);
auto const iter (m_cont.insert_commit (*p, d));
chronological.list.push_back (*p);
return std::make_pair (iterator (iter), true);
}
return std::make_pair (iterator (result.first), false);
@@ -2212,6 +2195,32 @@ insert (value_type&& value) ->
return iterator (iter);
}
#if 1 // Use insert() instead of insert_check() insert_commit()
// set, map
template <bool IsMulti, bool IsMap, class Key, class T,
class Duration, class Hash, class KeyEqual, class Allocator>
template <bool maybe_multi, class... Args>
auto
aged_unordered_container <IsMulti, IsMap, Key, T, Duration,
Hash, KeyEqual, Allocator>::
emplace (Args&&... args) ->
typename std::enable_if <! maybe_multi,
std::pair <iterator, bool>>::type
{
maybe_rehash (1);
// VFALCO NOTE Its unfortunate that we need to
// construct element here
element* const p (new_element (std::forward <Args> (args)...));
auto const result (m_cont.insert (*p));
if (result.second)
{
chronological.list.push_back (*p);
return std::make_pair (iterator (result.first), true);
}
delete_element (p);
return std::make_pair (iterator (result.first), false);
}
#else // As original, use insert_check() / insert_commit () pair.
// set, map
template <bool IsMulti, bool IsMap, class Key, class T,
class Duration, class Hash, class KeyEqual, class Allocator>
@@ -2234,13 +2243,14 @@ emplace (Args&&... args) ->
std::cref (m_config.key_value_equal()), d));
if (result.second)
{
chronological.list.push_back (*p);
auto const iter (m_cont.insert_commit (*p, d));
chronological.list.push_back (*p);
return std::make_pair (iterator (iter), true);
}
delete_element (p);
return std::make_pair (iterator (result.first), false);
}
#endif // 0
// multiset, multimap
template <bool IsMulti, bool IsMap, class Key, class T,
@@ -2268,7 +2278,7 @@ template <bool maybe_multi, class... Args>
auto
aged_unordered_container <IsMulti, IsMap, Key, T, Duration,
Hash, KeyEqual, Allocator>::
emplace_hint (const_iterator const& /*hint*/, Args&&... args) ->
emplace_hint (const_iterator /*hint*/, Args&&... args) ->
typename std::enable_if <! maybe_multi,
std::pair <iterator, bool>>::type
{
@@ -2283,8 +2293,8 @@ emplace_hint (const_iterator const& /*hint*/, Args&&... args) ->
std::cref (m_config.key_value_equal()), d));
if (result.second)
{
chronological.list.push_back (*p);
auto const iter (m_cont.insert_commit (*p, d));
chronological.list.push_back (*p);
return std::make_pair (iterator (iter), true);
}
delete_element (p);
@@ -2298,13 +2308,11 @@ detail::aged_container_iterator <false, Iterator, Base>
aged_unordered_container <IsMulti, IsMap, Key, T, Duration,
Hash, KeyEqual, Allocator>::
erase (detail::aged_container_iterator <
is_const, Iterator, Base> const& pos)
is_const, Iterator, Base> pos)
{
auto iter (pos.iterator());
auto p (&*iter++);
unlink_and_delete_element (p);
unlink_and_delete_element(&*((pos++).iterator()));
return detail::aged_container_iterator <
false, Iterator, Base> (iter);
false, Iterator, Base> (pos.iterator());
}
template <bool IsMulti, bool IsMap, class Key, class T,
@@ -2316,14 +2324,11 @@ aged_unordered_container <IsMulti, IsMap, Key, T, Duration,
erase (detail::aged_container_iterator <
is_const, Iterator, Base> first,
detail::aged_container_iterator <
is_const, Iterator, Base> const& last)
is_const, Iterator, Base> last)
{
size_type n (0);
for (; first != last; ++n)
{
auto p (&*first++);
unlink_and_delete_element (p);
}
for (; first != last;)
unlink_and_delete_element(&*((first++).iterator()));
return detail::aged_container_iterator <
false, Iterator, Base> (first.iterator());
}
@@ -2387,6 +2392,79 @@ touch (K const& k) ->
return n;
}
template <bool IsMulti, bool IsMap, class Key, class T,
class Duration, class Hash, class KeyEqual, class Allocator>
template <
bool OtherIsMap,
class OtherKey,
class OtherT,
class OtherDuration,
class OtherHash,
class OtherAllocator,
bool maybe_multi
>
typename std::enable_if <! maybe_multi, bool>::type
aged_unordered_container <
IsMulti, IsMap, Key, T, Duration, Hash, KeyEqual, Allocator>::
operator== (
aged_unordered_container <false, OtherIsMap,
OtherKey, OtherT, OtherDuration, OtherHash, KeyEqual,
OtherAllocator> const& other) const
{
if (size() != other.size())
return false;
for (auto iter (cbegin()), last (cend()), olast (other.cend());
iter != last; ++iter)
{
auto oiter (other.find (extract (*iter)));
if (oiter == olast)
return false;
}
return true;
}
template <bool IsMulti, bool IsMap, class Key, class T,
class Duration, class Hash, class KeyEqual, class Allocator>
template <
bool OtherIsMap,
class OtherKey,
class OtherT,
class OtherDuration,
class OtherHash,
class OtherAllocator,
bool maybe_multi
>
typename std::enable_if <maybe_multi, bool>::type
aged_unordered_container <
IsMulti, IsMap, Key, T, Duration, Hash, KeyEqual, Allocator>::
operator== (
aged_unordered_container <true, OtherIsMap,
OtherKey, OtherT, OtherDuration, OtherHash, KeyEqual,
OtherAllocator> const& other) const
{
if (size() != other.size())
return false;
typedef std::pair <const_iterator, const_iterator> EqRng;
for (auto iter (cbegin()), last (cend()); iter != last;)
{
auto const& k (extract (*iter));
auto const eq (equal_range (k));
auto const oeq (other.equal_range (k));
#if BEAST_NO_CXX14_IS_PERMUTATION
if (std::distance (eq.first, eq.second) !=
std::distance (oeq.first, oeq.second) ||
! std::is_permutation (eq.first, eq.second, oeq.first))
return false;
#else
if (! std::is_permutation (eq.first,
eq.second, oeq.first, oeq.second))
return false;
#endif
iter = eq.second;
}
return true;
}
//------------------------------------------------------------------------------
// map, set
@@ -2407,8 +2485,8 @@ insert_unchecked (value_type const& value) ->
if (result.second)
{
element* const p (new_element (value));
chronological.list.push_back (*p);
auto const iter (m_cont.insert_commit (*p, d));
chronological.list.push_back (*p);
return std::make_pair (iterator (iter), true);
}
return std::make_pair (iterator (result.first), false);

View File

@@ -21,16 +21,17 @@
#define BEAST_CONTAINER_HARDENED_HASH_H_INCLUDED
#include <beast/container/hash_append.h>
#include <beast/cxx14/utility.h> // <utility>
#include <beast/cxx14/type_traits.h> // <type_traits>
#include <beast/utility/noexcept.h>
#include <beast/utility/static_initializer.h>
#include <cstdint>
#include <functional>
#include <mutex>
#include <random>
#include <beast/cxx14/type_traits.h> // <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <beast/cxx14/utility.h> // <utility>
// When set to 1, makes the seed per-process instead
// of per default-constructed instance of hardened_hash
@@ -44,78 +45,86 @@
#endif
namespace beast {
namespace detail {
template <class Result>
class hardened_hash_base
using seed_pair = std::pair<std::uint64_t, std::uint64_t>;
template <bool = true>
seed_pair
get_seed_pair() noexcept
{
public:
typedef Result result_type;
struct state_t
{
std::mutex mutex;
std::random_device rng;
std::mt19937_64 gen {rng()};
std::uniform_int_distribution <std::uint64_t> dist;
private:
state_t() : gen(rng()) {}
// state_t(state_t const&) = delete;
// state_t& operator=(state_t const&) = delete;
};
static static_initializer <state_t> state;
std::lock_guard <std::mutex> lock (state->mutex);
return {state->dist(state->gen), state->dist(state->gen)};
}
template <class HashAlgorithm, bool ProcessSeeded>
class basic_hardened_hash;
/**
* Seed functor once per process
*/
template <class HashAlgorithm>
class basic_hardened_hash<HashAlgorithm, true>
{
static
seed_pair const&
init_seed_pair()
{
static static_initializer <seed_pair, basic_hardened_hash> const
p(get_seed_pair<>());
return *p;
}
public:
using result_type = typename HashAlgorithm::result_type;
template <class T>
result_type
next_seed() noexcept
operator()(T const& t) const noexcept
{
static std::mutex mutex;
static std::random_device rng;
static std::mt19937_64 gen (rng());
std::lock_guard <std::mutex> lock (mutex);
std::uniform_int_distribution <result_type> dist;
result_type value;
for(;;)
{
value = dist (gen);
// VFALCO Do we care if 0 is picked?
if (value != 0)
break;
}
return value;
std::uint64_t seed0;
std::uint64_t seed1;
std::tie(seed0, seed1) = init_seed_pair();
HashAlgorithm h(seed0, seed1);
hash_append(h, t);
return static_cast<result_type>(h);
}
#if BEAST_NO_HARDENED_HASH_INSTANCE_SEED
protected:
hardened_hash_base() noexcept = default;
hardened_hash_base(result_type) noexcept
{
}
result_type
seed() const noexcept
{
static result_type const value (next_seed());
return value;
}
#else
protected:
hardened_hash_base() noexcept
: m_seed (next_seed())
{
}
hardened_hash_base(result_type seed) noexcept
: m_seed (seed)
{
}
result_type
seed() const noexcept
{
return m_seed;
}
private:
// VFALCO Should seed be per process or per hash function?
result_type m_seed;
#endif
};
//------------------------------------------------------------------------------
/**
* Seed functor once per construction
*/
template <class HashAlgorithm>
class basic_hardened_hash<HashAlgorithm, false>
{
seed_pair m_seeds;
public:
using result_type = typename HashAlgorithm::result_type;
} // detail
basic_hardened_hash()
: m_seeds(get_seed_pair<>())
{}
template <class T>
result_type
operator()(T const& t) const noexcept
{
HashAlgorithm h(m_seeds.first, m_seeds.second);
hash_append(h, t);
return static_cast<result_type>(h);
}
};
//------------------------------------------------------------------------------
@@ -140,31 +149,18 @@ private:
}
@endcode
Do not use any version of Murmur or CityHash for the Hasher
template parameter (the hashing algorithm). For details
see https://131002.net/siphash/#at
*/
template <class T, class Hasher = detail::spooky_wrapper>
class hardened_hash
: public detail::hardened_hash_base <std::size_t>
{
typedef detail::hardened_hash_base <std::size_t> base;
public:
typedef T argument_type;
using detail::hardened_hash_base <std::size_t>::result_type;
public:
hardened_hash() = default;
explicit hardened_hash(result_type seed)
: base (seed)
{
}
result_type
operator() (argument_type const& key) const noexcept
{
Hasher h {base::seed()};
hash_append (h, key);
return static_cast<result_type> (h);
}
};
#if BEAST_NO_HARDENED_HASH_INSTANCE_SEED
template <class HashAlgorithm = siphash>
using hardened_hash = basic_hardened_hash<HashAlgorithm, true>;
#else
template <class HashAlgorithm = siphash>
using hardened_hash = basic_hardened_hash<HashAlgorithm, false>;
#endif
} // beast

View File

@@ -656,16 +656,38 @@ hash_append (Hasher& h, T0 const& t0, T1 const& t1, T const& ...t) noexcept
hash_append (h, t1, t...);
}
namespace detail
// See http://www.isthe.com/chongo/tech/comp/fnv/
class fnv1a
{
std::uint64_t state_ = 14695981039346656037ULL;
public:
class spooky_wrapper
using result_type = std::size_t;
void
append (void const* key, std::size_t len) noexcept
{
unsigned char const* p = static_cast<unsigned char const*>(key);
unsigned char const* const e = p + len;
for (; p < e; ++p)
state_ = (state_ ^ *p) * 1099511628211ULL;
}
explicit
operator std::size_t() noexcept
{
return static_cast<std::size_t>(state_);
}
};
// See http://burtleburtle.net/bob/hash/spooky.html
class spooky
{
SpookyHash state_;
public:
using result_type = std::size_t;
spooky_wrapper (std::size_t seed1 = 1, std::size_t seed2 = 2) noexcept
spooky (std::size_t seed1 = 1, std::size_t seed2 = 2) noexcept
{
state_.Init (seed1, seed2);
}
@@ -685,9 +707,30 @@ public:
}
};
} // detail
// See https://131002.net/siphash/
class siphash
{
std::uint64_t v0_ = 0x736f6d6570736575ULL;
std::uint64_t v1_ = 0x646f72616e646f6dULL;
std::uint64_t v2_ = 0x6c7967656e657261ULL;
std::uint64_t v3_ = 0x7465646279746573ULL;
unsigned char buf_[8];
unsigned bufsize_ = 0;
unsigned total_length_ = 0;
public:
using result_type = std::size_t;
template <class Hasher = detail::spooky_wrapper>
siphash() = default;
explicit siphash(std::uint64_t k0, std::uint64_t k1 = 0) noexcept;
void
append (void const* key, std::size_t len) noexcept;
explicit
operator std::size_t() noexcept;
};
template <class Hasher = spooky>
struct uhash
{
using result_type = typename Hasher::result_type;
@@ -702,6 +745,7 @@ struct uhash
}
};
} // beast
#endif

View File

@@ -0,0 +1,166 @@
//------------------------------- siphash.h ------------------------------------
//
// This software is in the public domain. The only restriction on its use is
// that no one can remove it from the public domain by claiming ownership of it,
// including the original authors.
//
// There is no warranty of correctness on the software contained herein. Use
// at your own risk.
//
// Derived from:
//
// SipHash reference C implementation
//
// Written in 2012 by Jean-Philippe Aumasson <jeanphilippe.aumasson@gmail.com>
// Daniel J. Bernstein <djb@cr.yp.to>
//
// To the extent possible under law, the author(s) have dedicated all copyright
// and related and neighboring rights to this software to the public domain
// worldwide. This software is distributed without any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication along
// with this software. If not, see
// <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//------------------------------------------------------------------------------
#include <beast/container/hash_append.h>
#include <algorithm>
#include <cstddef>
#include <cstdint>
// namespace acme is used to demonstrate example code. It is not proposed.
namespace beast
{
namespace
{
typedef std::uint64_t u64;
typedef std::uint32_t u32;
typedef std::uint8_t u8;
inline
u64
rotl(u64 x, u64 b)
{
return (x << b) | (x >> (64 - b));
}
inline
u64
u8to64_le(const u8* p)
{
#if BEAST_LITTLE_ENDIAN
return *static_cast<u64 const*>(static_cast<void const*>(p));
#else
return static_cast<u64>(p[7]) << 56 | static_cast<u64>(p[6]) << 48 |
static_cast<u64>(p[5]) << 40 | static_cast<u64>(p[4]) << 32 |
static_cast<u64>(p[3]) << 24 | static_cast<u64>(p[2]) << 16 |
static_cast<u64>(p[1]) << 8 | static_cast<u64>(p[0]);
#endif
}
inline
void
sipround(u64& v0, u64& v1, u64& v2, u64& v3)
{
v0 += v1;
v1 = rotl(v1, 13);
v1 ^= v0;
v0 = rotl(v0, 32);
v2 += v3;
v3 = rotl(v3, 16);
v3 ^= v2;
v0 += v3;
v3 = rotl(v3, 21);
v3 ^= v0;
v2 += v1;
v1 = rotl(v1, 17);
v1 ^= v2;
v2 = rotl(v2, 32);
}
} // unnamed
siphash::siphash(std::uint64_t k0, std::uint64_t k1) noexcept
{
v3_ ^= k1;
v2_ ^= k0;
v1_ ^= k1;
v0_ ^= k0;
}
void
siphash::append (void const* key, std::size_t inlen) noexcept
{
u8 const* in = static_cast<const u8*>(key);
total_length_ += inlen;
if (bufsize_ + inlen < 8)
{
std::copy(in, in+inlen, buf_ + bufsize_);
bufsize_ += inlen;
return;
}
if (bufsize_ > 0)
{
auto t = 8 - bufsize_;
std::copy(in, in+t, buf_ + bufsize_);
u64 m = u8to64_le( buf_ );
v3_ ^= m;
sipround(v0_, v1_, v2_, v3_);
sipround(v0_, v1_, v2_, v3_);
v0_ ^= m;
in += t;
inlen -= t;
}
bufsize_ = inlen & 7;
u8 const* const end = in + (inlen - bufsize_);
for ( ; in != end; in += 8 )
{
u64 m = u8to64_le( in );
v3_ ^= m;
sipround(v0_, v1_, v2_, v3_);
sipround(v0_, v1_, v2_, v3_);
v0_ ^= m;
}
std::copy(end, end + bufsize_, buf_);
}
siphash::operator std::size_t() noexcept
{
std::size_t b = static_cast<u64>(total_length_) << 56;
switch(bufsize_)
{
case 7:
b |= static_cast<u64>(buf_[6]) << 48;
case 6:
b |= static_cast<u64>(buf_[5]) << 40;
case 5:
b |= static_cast<u64>(buf_[4]) << 32;
case 4:
b |= static_cast<u64>(buf_[3]) << 24;
case 3:
b |= static_cast<u64>(buf_[2]) << 16;
case 2:
b |= static_cast<u64>(buf_[1]) << 8;
case 1:
b |= static_cast<u64>(buf_[0]);
case 0:
break;
}
v3_ ^= b;
sipround(v0_, v1_, v2_, v3_);
sipround(v0_, v1_, v2_, v3_);
v0_ ^= b;
v2_ ^= 0xff;
sipround(v0_, v1_, v2_, v3_);
sipround(v0_, v1_, v2_, v3_);
sipround(v0_, v1_, v2_, v3_);
sipround(v0_, v1_, v2_, v3_);
b = v0_ ^ v1_ ^ v2_ ^ v3_;
return b;
}
} // beast

View File

@@ -166,7 +166,7 @@ void SpookyHash::Hash128(
while (u.p64 < end)
{
Mix(u.p64, h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11);
u.p64 += sc_numVars;
u.p64 += sc_numVars;
}
}
else
@@ -175,7 +175,7 @@ void SpookyHash::Hash128(
{
memcpy(buf, u.p64, sc_blockSize);
Mix(buf, h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11);
u.p64 += sc_numVars;
u.p64 += sc_numVars;
}
}
@@ -275,7 +275,7 @@ void SpookyHash::Update(const void *message, size_t length)
while (u.p64 < end)
{
Mix(u.p64, h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11);
u.p64 += sc_numVars;
u.p64 += sc_numVars;
}
}
else
@@ -284,7 +284,7 @@ void SpookyHash::Update(const void *message, size_t length)
{
memcpy(m_data, u.p8, sc_blockSize);
Mix(m_data, h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11);
u.p64 += sc_numVars;
u.p64 += sc_numVars;
}
}

View File

@@ -147,7 +147,7 @@ public:
return static_cast <T*> (
::operator new (n * sizeof(T)));
}
void deallocate (T* p, std::size_t)
{
::operator delete (p);
@@ -482,6 +482,23 @@ public:
//--------------------------------------------------------------------------
template <bool IsUnordered, bool IsMulti, bool IsMap>
void
testIterator ();
// Unordered containers don't have reverse iterators
template <bool IsUnordered, bool IsMulti, bool IsMap>
typename std::enable_if <! IsUnordered>::type
testReverseIterator();
template <bool IsUnordered, bool IsMulti, bool IsMap>
typename std::enable_if <IsUnordered>::type
testReverseIterator()
{
}
//--------------------------------------------------------------------------
template <class Container, class Values>
void checkInsertCopy (Container& c, Values const& v);
@@ -524,6 +541,31 @@ public:
//--------------------------------------------------------------------------
// Helpers for erase tests
template <class Container, class Values>
void reverseFillAgedContainer(Container& c, Values const& v);
template <class Iter>
Iter nextToEndIter (Iter const beginIter, Iter const endItr);
//--------------------------------------------------------------------------
template <class Container, class Iter>
bool doElementErase (Container& c, Iter const beginItr, Iter const endItr);
template <bool IsUnordered, bool IsMulti, bool IsMap>
void testElementErase();
//--------------------------------------------------------------------------
template <class Container, class BeginEndSrc>
void doRangeErase (Container& c, BeginEndSrc const& beginEndSrc);
template <bool IsUnordered, bool IsMulti, bool IsMap>
void testRangeErase();
//--------------------------------------------------------------------------
// ordered
template <bool IsUnordered, bool IsMulti, bool IsMap>
typename std::enable_if <! IsUnordered>::type
@@ -1083,6 +1125,163 @@ testCopyMove ()
}
}
//------------------------------------------------------------------------------
//
// Iterator construction and assignment
//
//------------------------------------------------------------------------------
template <bool IsUnordered, bool IsMulti, bool IsMap>
void
aged_associative_container_test_base::
testIterator()
{
typedef TestTraits <IsUnordered, IsMulti, IsMap> Traits;
typedef typename Traits::Value Value;
typedef typename Traits::Alloc Alloc;
typename Traits::Clock clock;
auto const v (Traits::values());
//testcase (Traits::name() + " iterators");
testcase ("iterator");
typename Traits::template Cont <> c {clock};
using iterator = decltype (c.begin());
using const_iterator = decltype (c.cbegin());
// Should be able to construct or assign an iterator from an iterator.
iterator nnIt_0 {c.begin()};
iterator nnIt_1 {nnIt_0};
expect (nnIt_0 == nnIt_1, "iterator constructor failed");
iterator nnIt_2;
nnIt_2 = nnIt_1;
expect (nnIt_1 == nnIt_2, "iterator assignment failed");
// Should be able to construct or assign a const_iterator from a
// const_iterator.
const_iterator ccIt_0 {c.cbegin()};
const_iterator ccIt_1 {ccIt_0};
expect (ccIt_0 == ccIt_1, "const_iterator constructor failed");
const_iterator ccIt_2;
ccIt_2 = ccIt_1;
expect (ccIt_1 == ccIt_2, "const_iterator assignment failed");
// Comparison between iterator and const_iterator is okay
expect (nnIt_0 == ccIt_0,
"Comparing an iterator to a const_iterator failed");
expect (ccIt_1 == nnIt_1,
"Comparing a const_iterator to an iterator failed");
// Should be able to construct a const_iterator from an iterator.
const_iterator ncIt_3 {c.begin()};
const_iterator ncIt_4 {nnIt_0};
expect (ncIt_3 == ncIt_4,
"const_iterator construction from iterator failed");
const_iterator ncIt_5;
ncIt_5 = nnIt_2;
expect (ncIt_5 == ncIt_4,
"const_iterator assignment from iterator failed");
// None of these should compile because they construct or assign to a
// non-const iterator with a const_iterator.
// iterator cnIt_0 {c.cbegin()};
// iterator cnIt_1 {ccIt_0};
// iterator cnIt_2;
// cnIt_2 = ccIt_2;
}
template <bool IsUnordered, bool IsMulti, bool IsMap>
typename std::enable_if <! IsUnordered>::type
aged_associative_container_test_base::
testReverseIterator()
{
typedef TestTraits <IsUnordered, IsMulti, IsMap> Traits;
typedef typename Traits::Value Value;
typedef typename Traits::Alloc Alloc;
typename Traits::Clock clock;
auto const v (Traits::values());
//testcase (Traits::name() + " reverse_iterators");
testcase ("reverse_iterator");
typename Traits::template Cont <> c {clock};
using iterator = decltype (c.begin());
using const_iterator = decltype (c.cbegin());
using reverse_iterator = decltype (c.rbegin());
using const_reverse_iterator = decltype (c.crbegin());
// Naming decoder ring
// constructed from ------+ +----- constructed type
// /\/\ -- character pairs
// xAyBit
// r (reverse) or f (forward)--^-^
// ^-^------ C (const) or N (non-const)
// Should be able to construct or assign a reverse_iterator from a
// reverse_iterator.
reverse_iterator rNrNit_0 {c.rbegin()};
reverse_iterator rNrNit_1 {rNrNit_0};
expect (rNrNit_0 == rNrNit_1, "reverse_iterator constructor failed");
reverse_iterator xXrNit_2;
xXrNit_2 = rNrNit_1;
expect (rNrNit_1 == xXrNit_2, "reverse_iterator assignment failed");
// Should be able to construct or assign a const_reverse_iterator from a
// const_reverse_iterator
const_reverse_iterator rCrCit_0 {c.crbegin()};
const_reverse_iterator rCrCit_1 {rCrCit_0};
expect (rCrCit_0 == rCrCit_1, "reverse_iterator constructor failed");
const_reverse_iterator xXrCit_2;
xXrCit_2 = rCrCit_1;
expect (rCrCit_1 == xXrCit_2, "reverse_iterator assignment failed");
// Comparison between reverse_iterator and const_reverse_iterator is okay
expect (rNrNit_0 == rCrCit_0,
"Comparing an iterator to a const_iterator failed");
expect (rCrCit_1 == rNrNit_1,
"Comparing a const_iterator to an iterator failed");
// Should be able to construct or assign a const_reverse_iterator from a
// reverse_iterator
const_reverse_iterator rNrCit_0 {c.rbegin()};
const_reverse_iterator rNrCit_1 {rNrNit_0};
expect (rNrCit_0 == rNrCit_1,
"const_reverse_iterator construction from reverse_iterator failed");
xXrCit_2 = rNrNit_1;
expect (rNrCit_1 == xXrCit_2,
"const_reverse_iterator assignment from reverse_iterator failed");
// The standard allows these conversions:
// o reverse_iterator is explicitly constructible from iterator.
// o const_reverse_iterator is explicitly constructible from const_iterator.
// Should be able to construct or assign reverse_iterators from
// non-reverse iterators.
reverse_iterator fNrNit_0 {c.begin()};
const_reverse_iterator fNrCit_0 {c.begin()};
expect (fNrNit_0 == fNrCit_0,
"reverse_iterator construction from iterator failed");
const_reverse_iterator fCrCit_0 {c.cbegin()};
expect (fNrCit_0 == fCrCit_0,
"const_reverse_iterator construction from const_iterator failed");
// None of these should compile because they construct a non-reverse
// iterator from a reverse_iterator.
// iterator rNfNit_0 {c.rbegin()};
// const_iterator rNfCit_0 {c.rbegin()};
// const_iterator rCfCit_0 {c.crbegin()};
// You should not be able to assign an iterator to a reverse_iterator or
// vise-versa. So the following lines should not compile.
iterator xXfNit_0;
// xXfNit_0 = xXrNit_2;
// xXrNit_2 = xXfNit_0;
}
//------------------------------------------------------------------------------
//
// Modifiers
@@ -1232,9 +1431,12 @@ testChronological ()
c.chronological.cbegin(), c.chronological.cend(),
v.begin(), v.end(), equal_value <Traits> ()));
for (auto iter (v.rbegin()); iter != v.rend(); ++iter)
// Test touch() with a non-const iterator.
for (auto iter (v.crbegin()); iter != v.crend(); ++iter)
{
auto found (c.find (Traits::extract (*iter)));
using iterator = typename decltype (c)::iterator;
iterator found (c.find (Traits::extract (*iter)));
expect (found != c.cend());
if (found == c.cend())
return;
@@ -1243,7 +1445,30 @@ testChronological ()
expect (std::equal (
c.chronological.cbegin(), c.chronological.cend(),
v.rbegin(), v.rend(), equal_value <Traits> ()));
v.crbegin(), v.crend(), equal_value <Traits> ()));
// Test touch() with a const_iterator
for (auto iter (v.cbegin()); iter != v.cend(); ++iter)
{
using const_iterator = typename decltype (c)::const_iterator;
const_iterator found (c.find (Traits::extract (*iter)));
expect (found != c.cend());
if (found == c.cend())
return;
c.touch (found);
}
expect (std::equal (
c.chronological.cbegin(), c.chronological.cend(),
v.cbegin(), v.cend(), equal_value <Traits> ()));
{
// Because touch (reverse_iterator pos) is not allowed, the following
// lines should not compile for any aged_container type.
// c.touch (c.rbegin());
// c.touch (c.crbegin());
}
}
//------------------------------------------------------------------------------
@@ -1282,6 +1507,270 @@ testArrayCreate()
}
}
//------------------------------------------------------------------------------
//
// Helpers for erase tests
//
//------------------------------------------------------------------------------
template <class Container, class Values>
void
aged_associative_container_test_base::
reverseFillAgedContainer (Container& c, Values const& values)
{
// Just in case the passed in container was not empty.
c.clear();
// c.clock() returns an abstract_clock, so dynamic_cast to manual_clock.
typedef TestTraitsBase::Clock Clock;
Clock& clk (dynamic_cast <Clock&> (c.clock ()));
clk.set (0);
Values rev (values);
std::sort (rev.begin (), rev.end ());
std::reverse (rev.begin (), rev.end ());
for (auto& v : rev)
{
// Add values in reverse order so they are reversed chronologically.
++clk;
c.insert (v);
}
}
// Get one iterator before endIter. We have to use operator++ because you
// cannot use operator-- with unordered container iterators.
template <class Iter>
Iter
aged_associative_container_test_base::
nextToEndIter (Iter beginIter, Iter const endIter)
{
if (beginIter == endIter)
{
fail ("Internal test failure. Cannot advance beginIter");
return beginIter;
}
//
Iter nextToEnd = beginIter;
do
{
nextToEnd = beginIter++;
} while (beginIter != endIter);
return nextToEnd;
}
// Implementation for the element erase tests
//
// This test accepts:
// o the container from which we will erase elements
// o iterators into that container defining the range of the erase
//
// This implementation does not declare a pass, since it wants to allow
// the caller to examine the size of the container and the returned iterator
//
// Note that this test works on the aged_associative containers because an
// erase only invalidates references and iterators to the erased element
// (see 23.2.4/13). Therefore the passed-in end iterator stays valid through
// the whole test.
template <class Container, class Iter>
bool aged_associative_container_test_base::
doElementErase (Container& c, Iter const beginItr, Iter const endItr)
{
auto it (beginItr);
size_t count = c.size();
while (it != endItr)
{
auto expectIt = it;
++expectIt;
it = c.erase (it);
if (it != expectIt)
{
fail ("Unexpected returned iterator from element erase");
return false;
}
--count;
if (count != c.size())
{
fail ("Failed to erase element");
return false;
}
if (c.empty ())
{
if (it != endItr)
{
fail ("Erase of last element didn't produce end");
return false;
}
}
}
return true;
}
//------------------------------------------------------------------------------
//
// Erase of individual elements
//
//------------------------------------------------------------------------------
template <bool IsUnordered, bool IsMulti, bool IsMap>
void
aged_associative_container_test_base::
testElementErase ()
{
typedef TestTraits <IsUnordered, IsMulti, IsMap> Traits;
//testcase (Traits::name() + " element erase"
testcase ("element erase");
// Make and fill the container
typename Traits::Clock ck;
typename Traits::template Cont <> c {ck};
reverseFillAgedContainer (c, Traits::values());
{
// Test standard iterators
auto tempContainer (c);
if (! doElementErase (tempContainer,
tempContainer.cbegin(), tempContainer.cend()))
return; // Test failed
expect (tempContainer.empty(), "Failed to erase all elements");
pass();
}
{
// Test chronological iterators
auto tempContainer (c);
auto& chron (tempContainer.chronological);
if (! doElementErase (tempContainer, chron.begin(), chron.end()))
return; // Test failed
expect (tempContainer.empty(),
"Failed to chronologically erase all elements");
pass();
}
{
// Test standard iterator partial erase
auto tempContainer (c);
expect (tempContainer.size() > 2,
"Internal failure. Container too small.");
if (! doElementErase (tempContainer, ++tempContainer.begin(),
nextToEndIter (tempContainer.begin(), tempContainer.end())))
return; // Test failed
expect (tempContainer.size() == 2,
"Failed to erase expected number of elements");
pass();
}
{
// Test chronological iterator partial erase
auto tempContainer (c);
expect (tempContainer.size() > 2,
"Internal failure. Container too small.");
auto& chron (tempContainer.chronological);
if (! doElementErase (tempContainer, ++chron.begin(),
nextToEndIter (chron.begin(), chron.end())))
return; // Test failed
expect (tempContainer.size() == 2,
"Failed to chronologically erase expected number of elements");
pass();
}
{
auto tempContainer (c);
expect (tempContainer.size() > 4,
"Internal failure. Container too small.");
// erase(reverse_iterator) is not allowed. None of the following
// should compile for any aged_container type.
// c.erase (c.rbegin());
// c.erase (c.crbegin());
// c.erase(c.rbegin(), ++c.rbegin());
// c.erase(c.crbegin(), ++c.crbegin());
}
}
// Implementation for the range erase tests
//
// This test accepts:
//
// o A container with more than 2 elements and
// o An object to ask for begin() and end() iterators in the passed container
//
// This peculiar interface allows either the container itself to be passed as
// the second argument or the container's "chronological" element. Both
// sources of iterators need to be tested on the container.
//
// The test locates iterators such that a range-based delete leaves the first
// and last elements in the container. It then validates that the container
// ended up with the expected contents.
//
template <class Container, class BeginEndSrc>
void
aged_associative_container_test_base::
doRangeErase (Container& c, BeginEndSrc const& beginEndSrc)
{
expect (c.size () > 2,
"Internal test failure. Container must have more than 2 elements");
auto itBeginPlusOne (beginEndSrc.begin ());
auto const valueFront = *itBeginPlusOne;
++itBeginPlusOne;
// Get one iterator before end()
auto itBack (nextToEndIter (itBeginPlusOne, beginEndSrc.end ()));
auto const valueBack = *itBack;
// Erase all elements but first and last
auto const retIter = c.erase (itBeginPlusOne, itBack);
expect (c.size() == 2,
"Unexpected size for range-erased container");
expect (valueFront == *(beginEndSrc.begin()),
"Unexpected first element in range-erased container");
expect (valueBack == *(++beginEndSrc.begin()),
"Unexpected last element in range-erased container");
expect (retIter == (++beginEndSrc.begin()),
"Unexpected return iterator from erase");
pass ();
}
//------------------------------------------------------------------------------
//
// Erase range of elements
//
//------------------------------------------------------------------------------
template <bool IsUnordered, bool IsMulti, bool IsMap>
void
aged_associative_container_test_base::
testRangeErase ()
{
typedef TestTraits <IsUnordered, IsMulti, IsMap> Traits;
//testcase (Traits::name() + " element erase"
testcase ("range erase");
// Make and fill the container
typename Traits::Clock ck;
typename Traits::template Cont <> c {ck};
reverseFillAgedContainer (c, Traits::values());
// Not bothering to test range erase with reverse iterators.
{
auto tempContainer (c);
doRangeErase (tempContainer, tempContainer);
}
{
auto tempContainer (c);
doRangeErase (tempContainer, tempContainer.chronological);
}
}
//------------------------------------------------------------------------------
//
// Container-wide comparison
@@ -1378,9 +1867,13 @@ testMaybeUnorderedMultiMap ()
testConstructRange <IsUnordered, IsMulti, IsMap> ();
testConstructInitList <IsUnordered, IsMulti, IsMap> ();
testCopyMove <IsUnordered, IsMulti, IsMap> ();
testIterator <IsUnordered, IsMulti, IsMap> ();
testReverseIterator <IsUnordered, IsMulti, IsMap> ();
testModifiers <IsUnordered, IsMulti, IsMap> ();
testChronological <IsUnordered, IsMulti, IsMap> ();
testArrayCreate <IsUnordered, IsMulti, IsMap> ();
testElementErase <IsUnordered, IsMulti, IsMap> ();
testRangeErase <IsUnordered, IsMulti, IsMap> ();
testCompare <IsUnordered, IsMulti, IsMap> ();
testObservers <IsUnordered, IsMulti, IsMap> ();
}
@@ -1514,4 +2007,4 @@ BEAST_DEFINE_TESTSUITE(aged_unordered_map,container,beast);
BEAST_DEFINE_TESTSUITE(aged_unordered_multiset,container,beast);
BEAST_DEFINE_TESTSUITE(aged_unordered_multimap,container,beast);
}
} // namespace beast

Some files were not shown because too many files have changed in this diff Show More