Merge branch 'master' of github.com:jedmccaleb/NewCoin

This commit is contained in:
jed
2012-10-15 13:49:53 -07:00
38 changed files with 1436 additions and 848 deletions

View File

@@ -9,6 +9,7 @@
// Node
var util = require('util');
var events = require('events');
// npm
var WebSocket = require('ws');
@@ -16,6 +17,81 @@ var WebSocket = require('ws');
var amount = require('./amount.js');
var Amount = amount.Amount;
// Events emmitted:
// 'success'
// 'error'
// 'remoteError'
// 'remoteUnexpected'
// 'remoteDisconnected'
var Request = function (remote, command) {
this.message = {
'command' : command,
'id' : undefined,
};
this.remote = remote;
this.on('request', this.request_default);
};
Request.prototype = new events.EventEmitter;
// Return this. node EventEmitter's on doesn't return this.
Request.prototype.on = function (e, c) {
events.EventEmitter.prototype.on.call(this, e, c);
return this;
};
// Send the request to a remote.
Request.prototype.request = function (remote) {
this.emit('request', remote);
};
Request.prototype.request_default = function () {
this.remote.request(this);
};
// Set the ledger for a request.
// - ledger_entry
Request.prototype.ledger = function (ledger) {
this.message.ledger = ledger;
return this;
};
// Set the ledger_index for a request.
// - ledger_entry
Request.prototype.ledger_index = function (ledger_index) {
this.message.ledger_index = ledger_index;
return this;
};
Request.prototype.account_root = function (account) {
this.message.account_root = account;
return this;
};
Request.prototype.index = function (hash) {
this.message.index = hash;
return this;
};
Request.prototype.secret = function (s) {
if (s)
this.message.secret = s;
return this;
};
Request.prototype.transaction = function (t) {
this.message.transaction = t;
return this;
};
// --> trusted: truthy, if remote is trusted
var Remote = function (trusted, websocket_ip, websocket_port, config, trace) {
this.trusted = trusted;
@@ -44,20 +120,25 @@ var Remote = function (trusted, websocket_ip, websocket_port, config, trace) {
};
};
Remote.prototype = new events.EventEmitter;
var remoteConfig = function (config, server, trace) {
var serverConfig = config.servers[server];
return new Remote(serverConfig.trusted, serverConfig.websocket_ip, serverConfig.websocket_port, config, trace);
};
var flags = {
// OfferCreate flags:
'tfPassive' : 0x00010000,
'OfferCreate' : {
'Passive' : 0x00010000,
},
// Payment flags:
'tfCreateAccount' : 0x00010000,
'tfPartialPayment' : 0x00020000,
'tfLimitQuality' : 0x00040000,
'tfNoRippleDirect' : 0x00080000,
'Payment' : {
'CreateAccount' : 0x00010000,
'PartialPayment' : 0x00020000,
'LimitQuality' : 0x00040000,
'NoRippleDirect' : 0x00080000,
},
};
// XXX This needs to be determined from the network.
@@ -78,78 +159,152 @@ Remote.prototype.connect_helper = function () {
ws.response = {};
ws.onopen = function () {
if (this.trace) console.log("remote: onopen: %s", ws.readyState);
if (self.trace) console.log("remote: onopen: %s", ws.readyState);
ws.onclose = undefined;
ws.onerror = undefined;
clearTimeout(self.connect_timer); delete self.connect_timer;
clearTimeout(self.retry_timer); delete self.retry_timer;
self.done(ws.readyState);
};
ws.onerror = function () {
if (this.trace) console.log("remote: onerror: %s", ws.readyState);
if (self.trace) console.log("remote: onerror: %s", ws.readyState);
ws.onclose = undefined;
if (self.expire) {
if (this.trace) console.log("remote: was expired");
if (self.trace) console.log("remote: was expired");
ws.onerror = undefined;
self.done(ws.readyState);
} else {
// Delay and retry.
setTimeout(function () {
if (this.trace) console.log("remote: retry");
self.connect_helper();
}, 50); // Retry rate 50ms.
clearTimeout(self.retry_timer);
self.retry_timer = setTimeout(function () {
if (self.trace) console.log("remote: retry");
self.connect_helper();
}, 50); // Retry rate 50ms.
}
};
// Covers failure to open.
ws.onclose = function () {
if (this.trace) console.log("remote: onclose: %s", ws.readyState);
if (self.trace) console.log("remote: onclose: %s", ws.readyState);
ws.onerror = undefined;
clearTimeout(self.retry_timer);
delete self.retry_timer;
self.done(ws.readyState);
};
// Node's ws module doesn't pass arguments to onmessage.
ws.on('message', function (json, flags) {
var message = JSON.parse(json);
// console.log("message: %s", json);
if (message.type !== 'response') {
console.log("unexpected message: %s", json);
var message = JSON.parse(json);
var unexpected = false;
var request;
} else {
var done = ws.response[message.id];
if (done) {
done(message);
} else {
console.log("unexpected message id: %s", json);
if ('object' !== typeof message) {
unexpected = true;
}
else {
switch (message.type) {
case 'response':
{
request = ws.response[message.id];
if (!request) {
unexpected = true;
}
else if ('success' === message.result) {
if (self.trace) console.log("message: %s", json);
request.emit('success', message);
}
else if (message.error) {
if (self.trace) console.log("message: %s", json);
request.emit('error', {
'error' : 'remoteError',
'error_message' : 'Remote reported an error.',
'remote' : message,
});
}
}
case 'ledgerClosed':
// XXX If not trusted, need to verify we consider ledger closed.
// XXX Also need to consider a slow server or out of order response.
// XXX Be more defensive fields could be missing or of wrong type.
// YYY Might want to do some cache management.
self.ledger_closed = message.ledger_closed;
self.ledger_current_index = message.ledger_closed_index + 1;
self.emit('ledger_closed');
break;
default:
unexpected = true;
break;
}
}
if (!unexpected) {
}
// Unexpected response from remote.
// XXX This isn't so robust. Hard fails should probably only happen in a debugging scenairo.
else if (self.trusted) {
// Remote is trusted, report an error.
console.log("unexpected message from trusted remote: %s", json);
(request || self).emit('error', {
'error' : 'remoteUnexpected',
'error_message' : 'Unexpected response from remote.'
});
}
else {
// Treat as a disconnect.
if (self.trace) console.log("unexpected message from untrusted remote: %s", json);
// XXX All pending request need this treatment and need to actionally disconnect.
(request || self).emit('error', {
'error' : 'remoteDisconnected',
'error_message' : 'Remote disconnected.'
});
}
});
};
// Target state is connectted.
// XXX Get rid of 'done' use event model.
// done(readyState):
// --> readyState: OPEN, CLOSED
Remote.prototype.connect = function (done, timeout) {
var self = this;
this.url = util.format("ws://%s:%s", this.websocket_ip, this.websocket_port);
this.url = util.format("ws://%s:%s", this.websocket_ip, this.websocket_port);
this.done = done;
if (timeout) {
if (this.trace) console.log("remote: expire: false");
this.expire = false;
setTimeout(function () {
if (self.trace) console.log("remote: expire: timeout");
self.expire = true;
}, timeout);
this.expire = false;
this.connect_timer = setTimeout(function () {
if (self.trace) console.log("remote: expire: timeout");
delete self.connect_timer;
self.expire = true;
}, timeout);
} else {
if (this.trace) console.log("remote: expire: false");
this.expire = true;
@@ -159,137 +314,156 @@ Remote.prototype.connect = function (done, timeout) {
};
// Target stated is disconnected.
// Note: if exiting or other side is going away, don't need to disconnect.
Remote.prototype.disconnect = function (done) {
var self = this;
var ws = this.ws;
if (self.trace) console.log("remote: disconnect");
ws.onclose = function () {
if (self.trace) console.log("remote: onclose: %s", ws.readyState);
done(ws.readyState);
};
// ws package has a hard coded 30 second timeout.
ws.close();
};
// Send a request. The request should lack the id.
// Send a request.
// <-> request: what to send, consumed.
Remote.prototype.request = function (request, onDone, onFailure) {
Remote.prototype.request = function (request) {
var self = this;
this.ws.response[request.message.id = this.id] = request;
this.id += 1; // Advance id.
request.id = this.id;
if (this.trace) console.log("remote: request: %s", JSON.stringify(request.message));
this.ws.response[request.id] = function (response) {
if (self.trace) console.log("remote: response: %s", JSON.stringify(response));
if (onFailure && response.error)
{
onFailure(response);
}
else
{
onDone(response);
}
};
if (this.trace) console.log("remote: request: %s", JSON.stringify(request));
this.ws.send(JSON.stringify(request));
this.ws.send(JSON.stringify(request.message));
};
Remote.prototype.request_ledger_closed = function (onDone, onFailure) {
Remote.prototype.request_ledger_closed = function () {
assert(this.trusted); // If not trusted, need to check proof.
this.request({ 'command' : 'ledger_closed' }, onDone, onFailure);
return new Request(this, 'ledger_closed');
};
// Get the current proposed ledger entry. May be closed (and revised) at any time (even before returning).
// Only for use by unit tests.
Remote.prototype.request_ledger_current = function (onDone, onFailure) {
this.request({ 'command' : 'ledger_current' }, onDone, onFailure);
Remote.prototype.request_ledger_current = function () {
return new Request(this, 'ledger_current');
};
// <-> request:
// --> ledger : optional
// --> ledger_index : optional
// --> type
Remote.prototype.request_ledger_entry = function (req, onDone, onFailure) {
var self = this;
Remote.prototype.request_ledger_entry = function (type) {
assert(this.trusted); // If not trusted, need to check proof, maybe talk packet protocol.
req.command = 'ledger_entry';
if (req.ledger_closed)
{
// XXX Initial implementation no caching.
this.request(req, onDone, onFailure);
}
// else if (req.ledger_index)
else
{
// Current
// XXX Only allow with standalone mode. Must sync response with advance.
var entry;
switch (req.type) {
case 'account_root':
var cache = this.ledgers.current.account_root;
if (!cache)
{
cache = this.ledgers.current.account_root = {};
}
entry = this.ledgers.current.account_root[req.account];
break;
default:
// This type not cached.
var self = this;
var request = new Request(this, 'ledger_entry');
if (type)
this.type = type;
// Transparent caching:
request.on('request', function (remote) { // Intercept default request.
if (this.ledger_closed) {
// XXX Initial implementation no caching.
}
if (entry)
{
onDone(entry);
}
else
{
// Not cached.
// Submit request
this.request(req, function (r) {
// Got result.
switch (req.type) {
case 'account_root':
self.ledgers.current.account_root[r.node.Account] = r.node;
break;
// else if (req.ledger_index)
else if ('account_root' === this.type) {
var cache = self.ledgers.current.account_root;
if (!cache)
{
cache = self.ledgers.current.account_root = {};
}
var node = self.ledgers.current.account_root[request.message.account_root];
if (node) {
// Emulate fetch of ledger entry.
this.request.emit('success', {
// YYY Missing lots of fields.
'node' : node,
});
}
else {
// Was not cached.
// XXX Only allow with trusted mode. Must sync response with advance.
switch (response.type) {
case 'account_root':
request.on('success', function (message) {
// Cache node.
self.ledgers.current.account_root[message.node.Account] = message.node;
});
break;
default:
// This type not cached.
// nothing();
break;
}
onDone(r.node);
}, onFailure);
default:
// This type not cached.
}
this.request_default(remote);
}
}
}
});
return request;
};
// Submit a json transaction.
// done(value)
// XXX <-> value: { 'status', status, 'result' : result, ... }
Remote.prototype.submit = function (req, onDone, onFailure) {
if (this.trace) console.log("remote: submit: %s", JSON.stringify(req));
// Submit a transaction.
Remote.prototype.submit = function (transaction) {
var self = this;
req.command = 'submit';
if (req.secret && !this.trusted)
if (this.trace) console.log("remote: submit: %s", JSON.stringify(transaction.transaction));
if (transaction.secret && !this.trusted)
{
onFailure({ 'error' : 'untrustedSever', 'request' : req });
transaction.emit('error', {
'result' : 'serverUntrusted',
'result_message' : "Attempt to give a secret to an untrusted server."
});
}
else
{
this.request(req, onDone, onFailure);
else {
if (!transaction.transaction.Sequence) {
transaction.transaction.Sequence = this.account_seq(transaction.transaction.Account, 'ADVANCE');
}
if (!transaction.transaction.Sequence) {
var cache_request = this.account_cache(transaction.transaction.Account);
cache_request.on('success_account_cache', function () {
// Try again.
self.submit(transaction);
});
cache_request.on('error', function (message) {
// Forward errors.
transaction.emit('error', message);
});
cache_request.request();
}
else {
var submit_request = new Request(this, 'submit');
submit_request.transaction(transaction.transaction);
submit_request.secret(transaction.secret);
// Forward successes and errors.
submit_request.on('success', function (message) { transaction.emit('success', message); });
submit_request.on('error', function (message) { transaction.emit('error', message); });
// XXX If transaction has a 'final' event listeners, register transaction to listen to final results.
// XXX Final messages only happen if a transaction makes it into a ledger.
// XXX A transaction may be "lost" or even resubmitted in this case.
// XXX For when ledger closes, can look up transaction meta data.
submit_request.request();
}
}
};
@@ -297,173 +471,253 @@ Remote.prototype.submit = function (req, onDone, onFailure) {
// Higher level functions.
//
// Subscribe to a server to get the current and closed ledger.
// XXX Set up routine to update on notification.
Remote.prototype.server_subscribe = function (onDone, onFailure) {
// Subscribe to a server to get 'ledger_closed' events.
// 'subscribed' : This command was successful.
// 'ledger_closed : ledger_closed and ledger_current_index are updated.
Remote.prototype.server_subscribe = function () {
var self = this;
this.request(
{ 'command' : 'server_subscribe' },
function (r) {
self.ledger_current_index = r.ledger_current_index;
self.ledger_closed = r.ledger_closed;
self.stand_alone = r.stand_alone;
onDone();
},
onFailure
);
var request = new Request(this, 'server_subscribe');
request.on('success', function (message) {
self.ledger_current_index = message.ledger_current_index;
self.ledger_closed = message.ledger_closed;
self.stand_alone = message.stand_alone;
self.emit('subscribed');
self.emit('ledger_closed');
});
// XXX Could give error events, maybe even time out.
return this;
};
Remote.prototype.ledger_accept = function (onDone, onFailure) {
// Ask the remote to accept the current ledger.
// - To be notified when the ledger is accepted, server_subscribe() then listen to 'ledger_closed' events.
Remote.prototype.ledger_accept = function () {
if (this.stand_alone)
{
this.request(
{ 'command' : 'ledger_accept' },
onDone,
onFailure
);
var request = new Request(this, 'ledger_accept');
request.request();
}
else {
onFailure({ 'error' : 'notStandAlone' });
self.emit('error', {
'error' : 'notStandAlone'
});
}
return this;
};
// Refresh accounts[account].seq
// done(result);
Remote.prototype.account_seq = function (account, advance, onDone, onFailure) {
var self = this;
var account_root_entry = this.accounts[account];
// Return the next account sequence if possible.
// <-- undefined or Sequence
Remote.prototype.account_seq = function (account, advance) {
var account_info = this.accounts[account];
var seq;
if (account_info && account_info.seq)
{
var seq = account_info.seq;
if (advance) account_info.seq += 1;
}
return seq;
}
// Return a request to refresh accounts[account].seq.
Remote.prototype.account_cache = function (account) {
var self = this;
var request = this.request_ledger_entry('account_root')
// Only care about a closed ledger.
// YYY Might be more advanced and work with a changing current ledger.
request.ledger(this.ledger_closed); // XXX Requires active server_subscribe
request.account_root(account);
request.on('success', function (message) {
var seq = message.node.Sequence;
if (account_root_entry && account_root_entry.seq)
{
var seq = account_root_entry.seq;
if (!self.accounts[account])
self.accounts[account] = {};
if (advance) account_root_entry.seq += 1;
self.accounts[account].seq = seq;
onDone(seq);
}
else
{
// Need to get the ledger entry.
this.request_ledger_entry(
{
'ledger' : this.ledger_closed,
'type' : 'account_root',
'account_root' : account
},
function (node) {
// Extract the seqence number from the account root entry.
var seq = node.Sequence;
// If the caller also waits for 'success', they might run before this.
request.emit('success_account_cache');
});
if (!account_root_entry) self.accounts[account] = {};
self.accounts[account].seq = seq + !!advance;
onDone(seq);
},
onFailure
);
}
};
// A submit that fills in the sequence number.
Remote.prototype.submit_seq = function (trans, onDirty, onDone, onFailure) {
var self = this;
// Get the next sequence number for the account.
this.account_seq(trans.transaction.Account, true,
function (seq) {
trans.transaction.Sequence = seq;
self.submit(trans, onDone, onFailure);
},
onFailure);
return request;
};
// Mark an account's root node as dirty.
Remote.prototype.dirty_account_root = function (account) {
delete this.ledgers.current.account_root.account;
delete this.ledgers.current.account_root[account];
};
Remote.prototype.transaction = function () {
return new Transaction(this);
};
//
// Transactions
//
Remote.prototype.offer_create = function (secret, src, taker_pays, taker_gets, expiration, onDone) {
var secret = this.config.accounts[src] ? this.config.accounts[src].secret : secret;
var src_account = this.config.accounts[src] ? this.config.accounts[src].account : src;
// A class to implement transactions.
// - Collects parameters
// - Allow event listeners to be attached to determine the outcome.
var Transaction = function (remote) {
this.prototype = events.EventEmitter; // XXX Node specific.
var transaction = {
'TransactionType' : 'OfferCreate',
'Account' : src_account,
'Fee' : fees.offer.to_json(),
'TakerPays' : taker_pays.to_json(),
'TakerGets' : taker_gets.to_json(),
};
this.remote = remote;
this.secret = undefined;
this.transaction = {}; // Transaction data.
};
Transaction.prototype = new events.EventEmitter;
// Return this. node EventEmitter's on doesn't return this.
Transaction.prototype.on = function (e, c) {
events.EventEmitter.prototype.on.call(this, e, c);
return this;
};
// Submit a transaction to the network.
Transaction.prototype.submit = function () {
var transaction = this.transaction;
if (undefined === transaction.Fee) {
if ('Payment' === transaction.TransactionType
&& transaction.Flags & exports.flags.Payment.CreateAccount) {
transaction.Fee = fees.account_create.to_json();
}
else {
transaction.Fee = fees['default'].to_json();
}
}
this.remote.submit(this);
return this;
}
//
// Set options for Transactions
//
// If the secret is in the config object, it does not need to be provided.
Transaction.prototype.secret = function (secret) {
this.secret = secret;
}
Transaction.prototype.send_max = function (send_max) {
if (send_max)
this.transaction.SendMax = send_max.to_json();
return this;
}
// Add flags to a transaction.
// --> flags: undefined, _flag_, or [ _flags_ ]
Transaction.prototype.flags = function (flags) {
if (flags) {
var transaction_flags = exports.flags[this.transaction.TransactionType];
if (undefined == this.transaction.Flags)
this.transaction.Flags = 0;
var flag_set = 'object' === typeof flags ? flags : [ flags ];
for (index in flag_set) {
var flag = flag_set[index];
if (flag in transaction_flags)
{
this.transaction.Flags += transaction_flags[flag];
}
else {
// XXX Immediately report an error or mark it.
}
}
if (this.transaction.Flags & exports.flags.Payment.CreateAccount)
this.transaction.Fee = fees.account_create.to_json();
}
return this;
}
//
// Transactions
//
// remote.transaction() // Build a transaction object.
// .offer_create(...) // Set major parameters.
// .flags() // Set optional parameters.
// .on() // Register for events.
// .submit(); // Send to network.
//
// Allow config account defaults to be used.
Transaction.prototype.account_default = function (account) {
return this.remote.config.accounts[account] ? this.remote.config.accounts[account].account : account;
};
Transaction.prototype.account_secret = function (account) {
// Fill in secret from config, if needed.
return this.remote.config.accounts[account] ? this.remote.config.accounts[account].secret : undefined;
};
Transaction.prototype.offer_create = function (src, taker_pays, taker_gets, expiration) {
this.secret = this.account_secret(src);
this.transaction.TransactionType = 'OfferCreate';
this.transaction.Account = this.account_default(src);
this.transaction.Fee = fees.offer.to_json();
this.transaction.TakerPays = taker_pays.to_json();
this.transaction.TakerGets = taker_gets.to_json();
if (expiration)
transaction.Expiration = expiration;
this.transaction.Expiration = expiration;
this.submit_seq(
{
'transaction' : transaction,
'secret' : secret,
}, function () {
}, onDone);
return this;
};
Remote.prototype.ripple_line_set = function (secret, src, limit, quaility_in, quality_out, onDone) {
var secret = this.config.accounts[src] ? this.config.accounts[src].secret : secret;
var src_account = this.config.accounts[src] ? this.config.accounts[src].account : src;
// Construct a 'payment' transaction.
//
// When a transaction is submitted:
// - If the connection is reliable and the server is not merely forwarding and is not malicious,
Transaction.prototype.payment = function (src, dst, deliver_amount) {
this.secret = this.account_secret(src);
this.transaction.TransactionType = 'Payment';
this.transaction.Account = this.account_default(src);
this.transaction.Amount = deliver_amount.to_json();
this.transaction.Destination = this.account_default(dst);
var transaction = {
'TransactionType' : 'CreditSet',
'Account' : src_account,
'Fee' : fees['default'].to_json(),
};
return this;
}
if (limit)
transaction.LimitAmount = limit.to_json();
Remote.prototype.ripple_line_set = function (src, limit, quaility_in, quality_out) {
this.secret = this.account_secret(src);
this.transaction.TransactionType = 'CreditSet';
this.transaction.Account = this.account_default(src);
// Allow limit of 0 through.
if (undefined !== limit)
this.transaction.LimitAmount = limit.to_json();
if (quaility_in)
transaction.QualityIn = quaility_in;
this.transaction.QualityIn = quaility_in;
if (quaility_out)
transaction.QualityOut = quaility_out;
this.transaction.QualityOut = quaility_out;
this.submit_seq(
{
'transaction' : transaction,
'secret' : secret,
}, function () {
}, onDone);
};
// XXX Throw an error if nothing is set.
// --> create: is only valid if destination gets XNS.
Remote.prototype.send = function (secret, src, dst, deliver_amount, send_max, create, onDone) {
var secret = this.config.accounts[src] ? this.config.accounts[src].secret : secret;
var src_account = this.config.accounts[src] ? this.config.accounts[src].account : src;
var dst_account = this.config.accounts[dst] ? this.config.accounts[dst].account : dst;
var transaction = {
'TransactionType' : 'Payment',
'Account' : src_account,
'Fee' : (create ? fees.account_create : fees['default']).to_json(),
'Destination' : dst_account,
'Amount' : deliver_amount.to_json(),
};
if (create)
transaction.Flags = flags.tfCreateAccount;
if (send_max)
transaction.SendMax = send_max.to_json();
this.submit_seq(
{
'transaction' : transaction,
'secret' : secret,
}, function () {
}, onDone);
return this;
};
exports.Remote = Remote;

View File

@@ -1,11 +1,11 @@
#
# Sample newcoind.cfg
# Sample rippled.cfg
#
# This file should be named newcoind.cfg. This file is UTF-8 with Dos, UNIX,
# This file should be named rippled.cfg. This file is UTF-8 with Dos, UNIX,
# or Mac style end of lines. Blank lines and lines beginning with '#' are
# ignored. Undefined sections are reserved. No escapes are currently defined.
#
# When you launch newcoind, it will attempt to find this file. For details,
# When you launch rippled, it will attempt to find this file. For details,
# refer to the manual page for --conf command line option.
#
# [debug_logfile]
@@ -17,33 +17,33 @@
# Specifies where to find validators.txt for UNL boostrapping and RPC command unl_network.
# During alpha testing, this defaults to: redstem.com
#
# Example: newcoin.org
# Example: ripple.com
#
# [unl_default]:
# XXX This should be called: [validators_file]
# Specifies how to bootstrap the UNL list. The UNL list is based on a
# validators.txt file and is maintained in the databases. When newcoind
# validators.txt file and is maintained in the databases. When rippled
# starts up, if the databases are missing or are obsolete due to an upgrade
# of newcoind, newcoind will reconstruct the UNL list as specified here.
# of rippled, rippled will reconstruct the UNL list as specified here.
#
# If this entry is not present or empty, newcoind will look for a validators.txt in the
# If this entry is not present or empty, rippled will look for a validators.txt in the
# config directory. If not found there, it will attempt to retrieve the file
# from the newcoin foundation's web site.
# from the Ripple foundation's web site.
#
# This entry is also used by the RPC command unl_load.
#
# Specify the file by specifying its full path.
#
# Examples:
# C:/home/johndoe/newcoin/validators.txt
# /home/johndoe/newcoin/validators.txt
# C:/home/johndoe/ripple/validators.txt
# /home/johndoe/ripple/validators.txt
#
# [validators]:
# Only valid in "newcoind.cfg", "newcoin.txt", and the referered [validators_url].
# Only valid in "rippled.cfg", "ripple.txt", and the referered [validators_url].
# List of nodes to accept as validators speficied by public key or domain.
#
# For domains, newcoind will probe for https web servers at the specied
# domain in the following order: newcoin.DOMAIN, www.DOMAIN, DOMAIN
# For domains, rippled will probe for https web servers at the specied
# domain in the following order: ripple.DOMAIN, www.DOMAIN, DOMAIN
#
# Examples:
# redstem.com
@@ -51,7 +51,7 @@
# n9MqiExBcoG19UXwoLjBJnhsxEhAZMuWwJDRdkyDz1EkEkwzQTNt John Doe
#
# [ips]:
# Only valid in "newcoind.cfg", "newcoin.txt", and the referered [ips_url].
# Only valid in "rippled.cfg", "ripple.txt", and the referered [ips_url].
# List of ips where the Newcoin protocol is avialable.
# One ipv4 or ipv6 address per line.
# A port may optionally be specified after adding a space to the address.

View File

@@ -378,7 +378,7 @@ bool STAmount::setFullValue(const std::string& sAmount, const std::string& sCurr
void STAmount::canonicalize()
{
if (!mCurrency)
if (mCurrency.isZero())
{ // native currency amounts should always have an offset of zero
mIsNative = true;
@@ -401,7 +401,11 @@ void STAmount::canonicalize()
--mOffset;
}
assert(mValue <= cMaxNative);
if (mValue > cMaxNative)
{
assert(false);
throw std::runtime_error("Native currency amount out of range");
}
return;
}
@@ -430,9 +434,9 @@ void STAmount::canonicalize()
mValue /= 10;
++mOffset;
}
assert((mValue == 0) || ((mValue >= cMinValue) && (mValue <= cMaxValue)) );
assert((mValue == 0) || ((mOffset >= cMinOffset) && (mOffset <= cMaxOffset)) );
assert((mValue != 0) || (mOffset != -100) );
assert((mValue == 0) || ((mValue >= cMinValue) && (mValue <= cMaxValue)));
assert((mValue == 0) || ((mOffset >= cMinOffset) && (mOffset <= cMaxOffset)));
assert((mValue != 0) || (mOffset != -100));
}
void STAmount::add(Serializer& s) const
@@ -476,15 +480,24 @@ void STAmount::setValue(const STAmount &a)
mIsNegative = a.mIsNegative;
}
uint64 STAmount::toUInt64() const
{ // makes them sort easily
if (mIsNative)
return mValue;
if (mValue == 0)
return 0x4000000000000000ull;
if (mIsNegative)
return ((cMaxNative + 1) - mValue) | (static_cast<uint64>(mOffset + 97) << (64 - 10));
return mValue | (static_cast<uint64>(mOffset + 256 + 97) << (64 - 10));
int STAmount::compare(const STAmount& a) const
{ // Compares the value of a to the value of this STAmount, amounts must be comparable
if (mIsNegative != a.mIsNegative) return mIsNegative ? -1 : 1;
if (!mValue)
{
if (a.mIsNegative) return 1;
return a.mValue ? -1 : 0;
}
if (!a.mValue) return 1;
if (mOffset > a.mOffset) return mIsNegative ? -1 : 1;
if (mOffset < a.mOffset) return mIsNegative ? 1 : -1;
if (mValue > a.mValue) return mIsNegative ? -1 : 1;
if (mValue < a.mValue) return mIsNegative ? 1 : -1;
return 0;
}
STAmount* STAmount::construct(SerializerIterator& sit, SField::ref name)
@@ -633,25 +646,25 @@ bool STAmount::operator!=(const STAmount& a) const
bool STAmount::operator<(const STAmount& a) const
{
throwComparable(a);
return toUInt64() < a.toUInt64();
return compare(a) < 0;
}
bool STAmount::operator>(const STAmount& a) const
{
throwComparable(a);
return toUInt64() > a.toUInt64();
return compare(a) > 0;
}
bool STAmount::operator<=(const STAmount& a) const
{
throwComparable(a);
return toUInt64() <= a.toUInt64();
return compare(a) <= 0;
}
bool STAmount::operator>=(const STAmount& a) const
{
throwComparable(a);
return toUInt64() >= a.toUInt64();
return compare(a) >= 0;
}
STAmount& STAmount::operator+=(const STAmount& a)

View File

@@ -17,6 +17,8 @@
#include <boost/filesystem.hpp>
#include <boost/thread.hpp>
SETUP_LOG();
Application* theApp = NULL;
DatabaseCon::DatabaseCon(const std::string& strName, const char *initStrings[], int initCount)
@@ -56,7 +58,7 @@ void Application::stop()
mValidations.flush();
mAuxService.stop();
Log(lsINFO) << "Stopped: " << mIOService.stopped();
cLog(lsINFO) << "Stopped: " << mIOService.stopped();
}
static void InitDB(DatabaseCon** dbCon, const char *fileName, const char *dbInit[], int dbCount)
@@ -90,12 +92,12 @@ void Application::run()
if (theConfig.START_UP == Config::FRESH)
{
Log(lsINFO) << "Starting new Ledger";
cLog(lsINFO) << "Starting new Ledger";
startNewLedger();
}
else if (theConfig.START_UP == Config::LOAD)
{
Log(lsINFO) << "Loading Old Ledger";
cLog(lsINFO) << "Loading Old Ledger";
loadOldLedger();
}
else if (theConfig.START_UP == Config::NETWORK)
@@ -155,7 +157,7 @@ void Application::run()
if (theConfig.RUN_STANDALONE)
{
Log(lsWARNING) << "Running in standalone mode";
cLog(lsWARNING) << "Running in standalone mode";
mNetOps.setStandAlone();
}
else
@@ -185,8 +187,8 @@ void Application::startNewLedger()
NewcoinAddress rootAddress = NewcoinAddress::createAccountPublic(rootGeneratorMaster, 0);
// Print enough information to be able to claim root account.
Log(lsINFO) << "Root master seed: " << rootSeedMaster.humanSeed();
Log(lsINFO) << "Root account: " << rootAddress.humanAccountID();
cLog(lsINFO) << "Root master seed: " << rootSeedMaster.humanSeed();
cLog(lsINFO) << "Root account: " << rootAddress.humanAccountID();
{
Ledger::pointer firstLedger = boost::make_shared<Ledger>(rootAddress, SYSTEM_CURRENCY_START);
@@ -218,13 +220,34 @@ void Application::loadOldLedger()
}
lastLedger->setClosed();
cLog(lsINFO) << "Loading ledger " << lastLedger->getHash() << " seq:" << lastLedger->getLedgerSeq();
if (lastLedger->getAccountHash().isZero())
{
cLog(lsFATAL) << "Ledger is empty.";
assert(false);
exit(-1);
}
if (!lastLedger->walkLedger())
{
cLog(lsFATAL) << "Ledger is missing nodes.";
exit(-1);
}
if (!lastLedger->assertSane())
{
cLog(lsFATAL) << "Ledger is not sane.";
exit(-1);
}
Ledger::pointer openLedger = boost::make_shared<Ledger>(false, boost::ref(*lastLedger));
mMasterLedger.switchLedgers(lastLedger, openLedger);
mNetOps.setLastCloseTime(lastLedger->getCloseTimeNC());
}
catch (SHAMapMissingNode& mn)
{
Log(lsFATAL) << "Cannot load ledger. " << mn;
cLog(lsFATAL) << "Cannot load ledger. " << mn;
exit(-1);
}
}

View File

@@ -17,7 +17,7 @@ SField sfInvalid(-1), sfGeneric(0);
SField sfLedgerEntry(STI_LEDGERENTRY, 1, "LedgerEntry");
SField sfTransaction(STI_TRANSACTION, 1, "Transaction");
SField sfValidation(STI_VALIDATION, 1, "Validation");
SField sfID(STI_HASH256, 257, "id");
SField sfHash(STI_HASH256, 257, "hash");
SField sfIndex(STI_HASH256, 258, "index");
#define FIELD(name, type, index) SField sf##name(FIELD_CODE(STI_##type, index), STI_##type, index, #name);

View File

@@ -21,7 +21,11 @@ bool HashedObjectStore::store(HashedObjectType type, uint32 index,
const std::vector<unsigned char>& data, const uint256& hash)
{ // return: false=already in cache, true = added to cache
assert(hash == Serializer::getSHA512Half(data));
if (!theApp->getHashNodeDB()) return true;
if (!theApp->getHashNodeDB())
{
cLog(lsTRACE) << "HOS: no db";
return true;
}
if (mCache.touch(hash))
{
cLog(lsTRACE) << "HOS: " << hash << " store: incache";
@@ -31,59 +35,70 @@ bool HashedObjectStore::store(HashedObjectType type, uint32 index,
HashedObject::pointer object = boost::make_shared<HashedObject>(type, index, data, hash);
if (!mCache.canonicalize(hash, object))
{
// cLog(lsTRACE) << "Queuing write for " << hash;
boost::recursive_mutex::scoped_lock sl(mWriteMutex);
mWriteSet.push_back(object);
if (!mWritePending && (mWriteSet.size() >= 64))
if (!mWritePending)
{
mWritePending = true;
boost::thread t(boost::bind(&HashedObjectStore::bulkWrite, this));
t.detach();
}
}
// else
// cLog(lsTRACE) << "HOS: already had " << hash;
return true;
}
void HashedObjectStore::bulkWrite()
{
std::vector< boost::shared_ptr<HashedObject> > set;
set.reserve(128);
while (1)
{
boost::recursive_mutex::scoped_lock sl(mWriteMutex);
mWriteSet.swap(set);
mWritePending = false;
}
cLog(lsINFO) << "HOS: BulkWrite " << set.size();
set.clear();
set.reserve(128);
static boost::format fExists("SELECT ObjType FROM CommittedObjects WHERE Hash = '%s';");
static boost::format
fAdd("INSERT INTO CommittedObjects (Hash,ObjType,LedgerIndex,Object) VALUES ('%s','%c','%u',%s);");
Database* db = theApp->getHashNodeDB()->getDB();
ScopedLock sl = theApp->getHashNodeDB()->getDBLock();
db->executeSQL("BEGIN TRANSACTION;");
BOOST_FOREACH(const boost::shared_ptr<HashedObject>& it, set)
{
if (!SQL_EXISTS(db, boost::str(fExists % it->getHash().GetHex())))
{
char type;
switch(it->getType())
boost::recursive_mutex::scoped_lock sl(mWriteMutex);
mWriteSet.swap(set);
if (set.empty())
{
case hotLEDGER: type= 'L'; break;
case hotTRANSACTION: type = 'T'; break;
case hotACCOUNT_NODE: type = 'A'; break;
case hotTRANSACTION_NODE: type = 'N'; break;
default: type = 'U';
mWritePending = false;
return;
}
std::string rawData;
db->escape(&(it->getData().front()), it->getData().size(), rawData);
db->executeSQL(boost::str(fAdd % it->getHash().GetHex() % type % it->getIndex() % rawData ));
}
}
// cLog(lsINFO) << "HOS: writing " << set.size();
db->executeSQL("END TRANSACTION;");
static boost::format fExists("SELECT ObjType FROM CommittedObjects WHERE Hash = '%s';");
static boost::format
fAdd("INSERT INTO CommittedObjects (Hash,ObjType,LedgerIndex,Object) VALUES ('%s','%c','%u',%s);");
Database* db = theApp->getHashNodeDB()->getDB();
ScopedLock sl = theApp->getHashNodeDB()->getDBLock();
db->executeSQL("BEGIN TRANSACTION;");
BOOST_FOREACH(const boost::shared_ptr<HashedObject>& it, set)
{
if (!SQL_EXISTS(db, boost::str(fExists % it->getHash().GetHex())))
{
char type;
switch(it->getType())
{
case hotLEDGER: type = 'L'; break;
case hotTRANSACTION: type = 'T'; break;
case hotACCOUNT_NODE: type = 'A'; break;
case hotTRANSACTION_NODE: type = 'N'; break;
default: type = 'U';
}
std::string rawData;
db->escape(&(it->getData().front()), it->getData().size(), rawData);
db->executeSQL(boost::str(fAdd % it->getHash().GetHex() % type % it->getIndex() % rawData ));
}
}
db->executeSQL("END TRANSACTION;");
}
}
HashedObject::pointer HashedObjectStore::retrieve(const uint256& hash)
@@ -111,7 +126,7 @@ HashedObject::pointer HashedObjectStore::retrieve(const uint256& hash)
if (!db->executeSQL(sql) || !db->startIterRows())
{
cLog(lsTRACE) << "HOS: " << hash << " fetch: not in db";
// cLog(lsTRACE) << "HOS: " << hash << " fetch: not in db";
return HashedObject::pointer();
}

View File

@@ -31,11 +31,12 @@ Ledger::Ledger(const NewcoinAddress& masterID, uint64 startAmount) : mTotCoins(s
AccountState::pointer startAccount = boost::make_shared<AccountState>(masterID);
startAccount->peekSLE().setFieldAmount(sfBalance, startAmount);
startAccount->peekSLE().setFieldU32(sfSequence, 1);
cLog(lsTRACE) << "root account: " << startAccount->peekSLE().getJson(0);
mAccountStateMap->armDirty();
writeBack(lepCREATE, startAccount->getSLE());
#if 0
std::cerr << "Root account:";
startAccount->dump();
#endif
mAccountStateMap->flushDirty(256, hotACCOUNT_NODE, mLedgerSeq);
mAccountStateMap->disarmDirty();
}
Ledger::Ledger(const uint256 &parentHash, const uint256 &transHash, const uint256 &accountHash,
@@ -225,18 +226,24 @@ bool Ledger::addTransaction(const uint256& txID, const Serializer& txn)
{ // low-level - just add to table
SHAMapItem::pointer item = boost::make_shared<SHAMapItem>(txID, txn.peekData());
if (!mTransactionMap->addGiveItem(item, true, false))
{
cLog(lsWARNING) << "Attempt to add transaction to ledger that already had it";
return false;
}
return true;
}
bool Ledger::addTransaction(const uint256& txID, const Serializer& txn, const Serializer& md)
{ // low-level - just add to table
Serializer s(txn.getDataLength() + md.getDataLength() + 64);
Serializer s(txn.getDataLength() + md.getDataLength() + 16);
s.addVL(txn.peekData());
s.addVL(md.peekData());
SHAMapItem::pointer item = boost::make_shared<SHAMapItem>(txID, s.peekData());
if (!mTransactionMap->addGiveItem(item, true, true))
{
cLog(lsFATAL) << "Attempt to add transaction+MD to ledger that already had it";
return false;
}
return true;
}
@@ -273,6 +280,22 @@ Transaction::pointer Ledger::getTransaction(const uint256& transID) const
return txn;
}
SerializedTransaction::pointer Ledger::getSTransaction(SHAMapItem::ref item, SHAMapTreeNode::TNType type)
{
SerializerIterator sit(item->peekSerializer());
if (type == SHAMapTreeNode::tnTRANSACTION_NM)
return boost::make_shared<SerializedTransaction>(boost::ref(sit));
else if (type == SHAMapTreeNode::tnTRANSACTION_MD)
{
Serializer sTxn(sit.getVL());
SerializerIterator tSit(sTxn);
return boost::make_shared<SerializedTransaction>(boost::ref(tSit));
}
return SerializedTransaction::pointer();
}
bool Ledger::getTransaction(const uint256& txID, Transaction::pointer& txn, TransactionMetaSet::pointer& meta)
{
SHAMapTreeNode::TNType type;
@@ -285,7 +308,7 @@ bool Ledger::getTransaction(const uint256& txID, Transaction::pointer& txn, Tran
txn = theApp->getMasterTransaction().fetch(txID, false);
meta = TransactionMetaSet::pointer();
if (!txn)
txn = Transaction::sharedTransaction(item->getData(), true);
txn = Transaction::sharedTransaction(item->peekData(), true);
}
else if (type == SHAMapTreeNode::tnTRANSACTION_MD)
{ // in tree with metadata
@@ -313,12 +336,13 @@ bool Ledger::unitTest()
uint256 Ledger::getHash()
{
if(!mValidHash) updateHash();
return(mHash);
if (!mValidHash)
updateHash();
return mHash;
}
void Ledger::saveAcceptedLedger(Ledger::ref ledger)
{
void Ledger::saveAcceptedLedger()
{ // can be called in a different thread
static boost::format ledgerExists("SELECT LedgerSeq FROM Ledgers where LedgerSeq = %d;");
static boost::format deleteLedger("DELETE FROM Ledgers WHERE LedgerSeq = %d;");
static boost::format AcctTransExists("SELECT LedgerSeq FROM AccountTransactions WHERE TransId = '%s';");
@@ -328,78 +352,92 @@ void Ledger::saveAcceptedLedger(Ledger::ref ledger)
"(LedgerHash,LedgerSeq,PrevHash,TotalCoins,ClosingTime,PrevClosingTime,CloseTimeRes,CloseFlags,"
"AccountSetHash,TransSetHash) VALUES ('%s','%u','%s','%s','%u','%u','%d','%u','%s','%s');");
ScopedLock sl(theApp->getLedgerDB()->getDBLock());
if (SQL_EXISTS(theApp->getLedgerDB()->getDB(), boost::str(ledgerExists % ledger->mLedgerSeq)))
theApp->getLedgerDB()->getDB()->executeSQL(boost::str(deleteLedger % ledger->mLedgerSeq));
theApp->getLedgerDB()->getDB()->executeSQL(boost::str(addLedger %
ledger->getHash().GetHex() % ledger->mLedgerSeq % ledger->mParentHash.GetHex() %
boost::lexical_cast<std::string>(ledger->mTotCoins) % ledger->mCloseTime % ledger->mParentCloseTime %
ledger->mCloseResolution % ledger->mCloseFlags %
ledger->mAccountHash.GetHex() % ledger->mTransHash.GetHex()));
// write out dirty nodes
int fc;
while ((fc = ledger->mTransactionMap->flushDirty(256, hotTRANSACTION_NODE, ledger->mLedgerSeq)) > 0)
{ cLog(lsINFO) << "Flushed " << fc << " dirty transaction nodes"; }
while ((fc = ledger->mAccountStateMap->flushDirty(256, hotACCOUNT_NODE, ledger->mLedgerSeq)) > 0)
{ cLog(lsINFO) << "Flushed " << fc << " dirty state nodes"; }
ledger->disarmDirty();
SHAMap& txSet = *ledger->peekTransactionMap();
Database *db = theApp->getTxnDB()->getDB();
ScopedLock dbLock = theApp->getTxnDB()->getDBLock();
db->executeSQL("BEGIN TRANSACTION;");
for (SHAMapItem::pointer item = txSet.peekFirstItem(); !!item; item = txSet.peekNextItem(item->getTag()))
if (!getAccountHash().isNonZero())
{
SerializedTransaction::pointer txn = theApp->getMasterTransaction().fetch(item, false, ledger->mLedgerSeq);
// Make sure transaction is in AccountTransactions.
if (!SQL_EXISTS(db, boost::str(AcctTransExists % item->getTag().GetHex())))
{
// Transaction not in AccountTransactions
std::vector<NewcoinAddress> accts = txn->getAffectedAccounts();
std::string sql = "INSERT INTO AccountTransactions (TransID, Account, LedgerSeq) VALUES ";
bool first = true;
for (std::vector<NewcoinAddress>::iterator it = accts.begin(), end = accts.end(); it != end; ++it)
{
if (!first)
sql += ", ('";
else
{
sql += "('";
first = false;
}
sql += txn->getTransactionID().GetHex();
sql += "','";
sql += it->humanAccountID();
sql += "',";
sql += boost::lexical_cast<std::string>(ledger->getLedgerSeq());
sql += ")";
}
sql += ";";
Log(lsTRACE) << "ActTx: " << sql;
db->executeSQL(sql); // may already be in there
}
if (SQL_EXISTS(db, boost::str(transExists % txn->getTransactionID().GetHex())))
{
// In Transactions, update LedgerSeq and Status.
db->executeSQL(boost::str(updateTx
% ledger->getLedgerSeq()
% TXN_SQL_VALIDATED
% txn->getTransactionID().GetHex()));
}
else
{
// Not in Transactions, insert the whole thing..
db->executeSQL(
txn->getSQLInsertHeader() + txn->getSQL(ledger->getLedgerSeq(), TXN_SQL_VALIDATED) + ";");
}
cLog(lsFATAL) << "AH is zero: " << getJson(0);
assert(false);
}
db->executeSQL("COMMIT TRANSACTION;");
theApp->getOPs().pubLedger(ledger);
assert (getAccountHash() == mAccountStateMap->getHash());
assert (getTransHash() == mTransactionMap->getHash());
{
ScopedLock sl(theApp->getLedgerDB()->getDBLock());
if (SQL_EXISTS(theApp->getLedgerDB()->getDB(), boost::str(ledgerExists % mLedgerSeq)))
theApp->getLedgerDB()->getDB()->executeSQL(boost::str(deleteLedger % mLedgerSeq));
theApp->getLedgerDB()->getDB()->executeSQL(boost::str(addLedger %
getHash().GetHex() % mLedgerSeq % mParentHash.GetHex() %
boost::lexical_cast<std::string>(mTotCoins) % mCloseTime % mParentCloseTime %
mCloseResolution % mCloseFlags %
mAccountHash.GetHex() % mTransHash.GetHex()));
// write out dirty nodes
int fc;
while ((fc = mTransactionMap->flushDirty(256, hotTRANSACTION_NODE, mLedgerSeq)) > 0)
{ cLog(lsINFO) << "Flushed " << fc << " dirty transaction nodes"; }
while ((fc = mAccountStateMap->flushDirty(256, hotACCOUNT_NODE, mLedgerSeq)) > 0)
{ cLog(lsINFO) << "Flushed " << fc << " dirty state nodes"; }
disarmDirty();
SHAMap& txSet = *peekTransactionMap();
Database *db = theApp->getTxnDB()->getDB();
ScopedLock dbLock = theApp->getTxnDB()->getDBLock();
db->executeSQL("BEGIN TRANSACTION;");
SHAMapTreeNode::TNType type;
for (SHAMapItem::pointer item = txSet.peekFirstItem(type); !!item;
item = txSet.peekNextItem(item->getTag(), type))
{
SerializedTransaction::pointer txn = getSTransaction(item, type);
assert(txn);
// Make sure transaction is in AccountTransactions.
if (!SQL_EXISTS(db, boost::str(AcctTransExists % item->getTag().GetHex())))
{
// Transaction not in AccountTransactions
std::vector<NewcoinAddress> accts = txn->getAffectedAccounts();
std::string sql = "INSERT INTO AccountTransactions (TransID, Account, LedgerSeq) VALUES ";
bool first = true;
for (std::vector<NewcoinAddress>::iterator it = accts.begin(), end = accts.end(); it != end; ++it)
{
if (!first)
sql += ", ('";
else
{
sql += "('";
first = false;
}
sql += txn->getTransactionID().GetHex();
sql += "','";
sql += it->humanAccountID();
sql += "',";
sql += boost::lexical_cast<std::string>(getLedgerSeq());
sql += ")";
}
sql += ";";
Log(lsTRACE) << "ActTx: " << sql;
db->executeSQL(sql); // may already be in there
}
if (SQL_EXISTS(db, boost::str(transExists % txn->getTransactionID().GetHex())))
{
// In Transactions, update LedgerSeq and Status.
db->executeSQL(boost::str(updateTx
% getLedgerSeq()
% TXN_SQL_VALIDATED
% txn->getTransactionID().GetHex()));
}
else
{
// Not in Transactions, insert the whole thing..
db->executeSQL(
txn->getSQLInsertHeader() + txn->getSQL(getLedgerSeq(), TXN_SQL_VALIDATED) + ";");
}
}
db->executeSQL("COMMIT TRANSACTION;");
}
theApp->getOPs().pubLedger(shared_from_this());
}
Ledger::pointer Ledger::getSQL(const std::string& sql)
@@ -449,6 +487,7 @@ Ledger::pointer Ledger::getSQL(const std::string& sql)
assert(false);
return Ledger::pointer();
}
Log(lsDEBUG) << "Loaded ledger: " << ledgerHash;
return ret;
}
@@ -469,6 +508,11 @@ Ledger::pointer Ledger::loadByHash(const uint256& ledgerHash)
}
void Ledger::addJson(Json::Value& ret, int options)
{
ret["ledger"] = getJson(options);
}
Json::Value Ledger::getJson(int options)
{
Json::Value ledger(Json::objectValue);
@@ -554,8 +598,8 @@ void Ledger::addJson(Json::Value& ret, int options)
}
ledger["accountState"] = state;
}
ledger["seqNum"]=boost::lexical_cast<std::string>(mLedgerSeq);
ret["ledger"] = ledger;
ledger["seqNum"] = boost::lexical_cast<std::string>(mLedgerSeq);
return ledger;
}
void Ledger::setAcquiring(void)
@@ -613,7 +657,7 @@ LedgerStateParms Ledger::writeBack(LedgerStateParms parms, SLE::ref entry)
if (create)
{
assert(!mAccountStateMap->hasItem(entry->getIndex()));
if(!mAccountStateMap->addGiveItem(item, false, false)) // FIXME: TX metadata
if(!mAccountStateMap->addGiveItem(item, false, false))
{
assert(false);
return lepERROR;
@@ -621,7 +665,7 @@ LedgerStateParms Ledger::writeBack(LedgerStateParms parms, SLE::ref entry)
return lepCREATED;
}
if (!mAccountStateMap->updateGiveItem(item, false, false)) // FIXME: TX metadata
if (!mAccountStateMap->updateGiveItem(item, false, false))
{
assert(false);
return lepERROR;
@@ -930,6 +974,32 @@ uint256 Ledger::getRippleStateIndex(const NewcoinAddress& naA, const NewcoinAddr
return s.getSHA512Half();
}
bool Ledger::walkLedger()
{
std::vector<SHAMapMissingNode> missingNodes;
mAccountStateMap->walkMap(missingNodes, 32);
if (sLog(lsINFO) && !missingNodes.empty())
{
Log(lsINFO) << missingNodes.size() << " missing account node(s)";
Log(lsINFO) << "First: " << missingNodes[0];
}
mTransactionMap->walkMap(missingNodes, 32);
return missingNodes.empty();
}
bool Ledger::assertSane()
{
if (mHash.isNonZero() && mAccountHash.isNonZero() && mAccountStateMap && mTransactionMap &&
(mAccountHash == mAccountStateMap->getHash()) && (mTransHash == mTransactionMap->getHash()))
return true;
Log(lsFATAL) << "ledger is not sane";
Json::Value j = getJson(0);
j["accountTreeHash"] = mAccountHash.GetHex();
j["transTreeHash"] = mTransHash.GetHex();
assert(false);
return false;
}
// vim:ts=4

View File

@@ -57,7 +57,7 @@ public:
TR_PASTASEQ = 6, // account is past this transaction
TR_PREASEQ = 7, // account is missing transactions before this
TR_BADLSEQ = 8, // ledger too early
TR_TOOSMALL = 9, // amount is less than Tx fee
TR_TOOSMALL = 9, // amount is less than Tx fee
};
// ledger close flags
@@ -82,10 +82,6 @@ private:
Ledger& operator=(const Ledger&); // no implementation
protected:
SLE::pointer getASNode(LedgerStateParms& parms, const uint256& nodeID, LedgerEntryType let);
public:
@@ -153,6 +149,7 @@ public:
bool hasTransaction(const uint256& TransID) const { return mTransactionMap->hasItem(TransID); }
Transaction::pointer getTransaction(const uint256& transID) const;
bool getTransaction(const uint256& transID, Transaction::pointer& txn, TransactionMetaSet::pointer& txMeta);
static SerializedTransaction::pointer getSTransaction(SHAMapItem::ref, SHAMapTreeNode::TNType);
// high-level functions
AccountState::pointer getAccountState(const NewcoinAddress& acctID);
@@ -161,7 +158,7 @@ public:
SLE::pointer getAccountRoot(const NewcoinAddress& naAccountID);
// database functions
static void saveAcceptedLedger(Ledger::ref);
void saveAcceptedLedger();
static Ledger::pointer loadByIndex(uint32 ledgerIndex);
static Ledger::pointer loadByHash(const uint256& ledgerHash);
@@ -281,8 +278,12 @@ public:
SLE::pointer getRippleState(const uint160& uiA, const uint160& uiB, const uint160& uCurrency)
{ return getRippleState(getRippleStateIndex(NewcoinAddress::createAccountID(uiA), NewcoinAddress::createAccountID(uiB), uCurrency)); }
Json::Value getJson(int options);
void addJson(Json::Value&, int options);
bool walkLedger();
bool assertSane();
static bool unitTest();
};

View File

@@ -18,7 +18,7 @@
#define TRUST_NETWORK
// #define LC_DEBUG
#define LC_DEBUG
typedef std::pair<const uint160, LedgerProposal::pointer> u160_prop_pair;
typedef std::pair<const uint256, LCTransaction::pointer> u256_lct_pair;
@@ -58,7 +58,6 @@ void TransactionAcquire::trigger(Peer::ref peer, bool timer)
}
if (!mHaveRoot)
{
cLog(lsINFO) << "have no root";
ripple::TMGetLedger tmGL;
tmGL.set_ledgerhash(mHash.begin(), mHash.size());
tmGL.set_itype(ripple::liTS_CANDIDATE);
@@ -301,37 +300,9 @@ void LedgerConsensus::checkLCL()
void LedgerConsensus::handleLCL(const uint256& lclHash)
{
mPrevLedgerHash = lclHash;
if (mPreviousLedger->getHash() == mPrevLedgerHash)
return;
Ledger::pointer newLCL = theApp->getMasterLedger().getLedgerByHash(lclHash);
if (newLCL)
mPreviousLedger = newLCL;
else if (mAcquiringLedger && (mAcquiringLedger->getHash() == mPrevLedgerHash))
return;
else
{
cLog(lsWARNING) << "Need consensus ledger " << mPrevLedgerHash;
mAcquiringLedger = theApp->getMasterLedgerAcquire().findCreate(mPrevLedgerHash);
std::vector<Peer::pointer> peerList = theApp->getConnectionPool().getPeerVector();
bool found = false;
BOOST_FOREACH(Peer::ref peer, peerList)
{
if (peer->hasLedger(mPrevLedgerHash))
{
found = true;
mAcquiringLedger->peerHas(peer);
}
}
if (!found)
{
BOOST_FOREACH(Peer::ref peer, peerList)
mAcquiringLedger->peerHas(peer);
}
if (mPrevLedgerHash != lclHash)
{ // first time switching to this ledger
mPrevLedgerHash = lclHash;
if (mHaveCorrectLCL && mProposing && mOurPosition)
{
@@ -339,15 +310,47 @@ void LedgerConsensus::handleLCL(const uint256& lclHash)
mOurPosition->bowOut();
propose();
}
mHaveCorrectLCL = false;
mProposing = false;
mValidating = false;
mCloseTimes.clear();
mPeerPositions.clear();
mPeerData.clear();
mDisputes.clear();
mCloseTimes.clear();
mDeadNodes.clear();
playbackProposals();
return;
}
if (mPreviousLedger->getHash() != mPrevLedgerHash)
{ // we need to switch the ledger we're working from
Ledger::pointer newLCL = theApp->getMasterLedger().getLedgerByHash(lclHash);
if (newLCL)
mPreviousLedger = newLCL;
else if (!mAcquiringLedger || (mAcquiringLedger->getHash() != mPrevLedgerHash))
{ // need to start acquiring the correct consensus LCL
cLog(lsWARNING) << "Need consensus ledger " << mPrevLedgerHash;
mAcquiringLedger = theApp->getMasterLedgerAcquire().findCreate(mPrevLedgerHash);
std::vector<Peer::pointer> peerList = theApp->getConnectionPool().getPeerVector();
bool found = false;
BOOST_FOREACH(Peer::ref peer, peerList)
{
if (peer->hasLedger(mPrevLedgerHash))
{
found = true;
mAcquiringLedger->peerHas(peer);
}
}
if (!found)
{
BOOST_FOREACH(Peer::ref peer, peerList)
mAcquiringLedger->peerHas(peer);
}
mHaveCorrectLCL = false;
return;
}
}
cLog(lsINFO) << "Acquired the consensus ledger " << mPrevLedgerHash;
@@ -356,7 +359,6 @@ void LedgerConsensus::handleLCL(const uint256& lclHash)
mCloseResolution = ContinuousLedgerTiming::getNextLedgerTimeResolution(
mPreviousLedger->getCloseResolution(), mPreviousLedger->getCloseAgree(),
mPreviousLedger->getLedgerSeq() + 1);
playbackProposals();
}
void LedgerConsensus::takeInitialPosition(Ledger& initialLedger)
@@ -675,7 +677,7 @@ void LedgerConsensus::updateOurPositions()
for (std::map<uint32, int>::iterator it = closeTimes.begin(), end = closeTimes.end(); it != end; ++it)
{
cLog(lsINFO) << "CCTime: " << it->first << " has " << it->second << ", " << thresh << " required";
if (it->second > thresh)
if (it->second >= thresh)
{
cLog(lsINFO) << "Close time consensus reached: " << it->first;
mHaveCloseTimeConsensus = true;
@@ -1114,6 +1116,15 @@ void LedgerConsensus::accept(SHAMap::ref set)
newLCL->setAccepted(closeTime, mCloseResolution, closeTimeCorrect);
newLCL->updateHash();
uint256 newLCLHash = newLCL->getHash();
if (sLog(lsTRACE))
{
Log(lsTRACE) << "newLCL";
Json::Value p;
newLCL->addJson(p, LEDGER_JSON_DUMP_TXNS | LEDGER_JSON_DUMP_STATE);
Log(lsTRACE) << p;
}
statusChange(ripple::neACCEPTED_LEDGER, *newLCL);
if (mValidating)
{
@@ -1181,13 +1192,6 @@ void LedgerConsensus::accept(SHAMap::ref set)
theApp->getOPs().closeTimeOffset(offset);
}
if (sLog(lsTRACE))
{
Log(lsTRACE) << "newLCL";
Json::Value p;
newLCL->addJson(p, LEDGER_JSON_DUMP_TXNS | LEDGER_JSON_DUMP_STATE);
Log(lsTRACE) << p;
}
}
void LedgerConsensus::endConsensus()

View File

@@ -51,7 +51,7 @@ void LedgerEntrySet::swapWith(LedgerEntrySet& e)
// This is basically: copy-on-read.
SLE::pointer LedgerEntrySet::getEntry(const uint256& index, LedgerEntryAction& action)
{
boost::unordered_map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(index);
std::map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(index);
if (it == mEntries.end())
{
action = taaNONE;
@@ -98,7 +98,7 @@ SLE::pointer LedgerEntrySet::entryCache(LedgerEntryType letType, const uint256&
LedgerEntryAction LedgerEntrySet::hasEntry(const uint256& index) const
{
boost::unordered_map<uint256, LedgerEntrySetEntry>::const_iterator it = mEntries.find(index);
std::map<uint256, LedgerEntrySetEntry>::const_iterator it = mEntries.find(index);
if (it == mEntries.end())
return taaNONE;
return it->second.mAction;
@@ -106,7 +106,7 @@ LedgerEntryAction LedgerEntrySet::hasEntry(const uint256& index) const
void LedgerEntrySet::entryCache(SLE::ref sle)
{
boost::unordered_map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(sle->getIndex());
std::map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(sle->getIndex());
if (it == mEntries.end())
{
mEntries.insert(std::make_pair(sle->getIndex(), LedgerEntrySetEntry(sle, taaCACHED, mSeq)));
@@ -127,7 +127,7 @@ void LedgerEntrySet::entryCache(SLE::ref sle)
void LedgerEntrySet::entryCreate(SLE::ref sle)
{
boost::unordered_map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(sle->getIndex());
std::map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(sle->getIndex());
if (it == mEntries.end())
{
mEntries.insert(std::make_pair(sle->getIndex(), LedgerEntrySetEntry(sle, taaCREATE, mSeq)));
@@ -157,7 +157,7 @@ void LedgerEntrySet::entryCreate(SLE::ref sle)
void LedgerEntrySet::entryModify(SLE::ref sle)
{
boost::unordered_map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(sle->getIndex());
std::map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(sle->getIndex());
if (it == mEntries.end())
{
mEntries.insert(std::make_pair(sle->getIndex(), LedgerEntrySetEntry(sle, taaMODIFY, mSeq)));
@@ -192,7 +192,7 @@ void LedgerEntrySet::entryModify(SLE::ref sle)
void LedgerEntrySet::entryDelete(SLE::ref sle)
{
boost::unordered_map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(sle->getIndex());
std::map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(sle->getIndex());
if (it == mEntries.end())
{
mEntries.insert(std::make_pair(sle->getIndex(), LedgerEntrySetEntry(sle, taaDELETE, mSeq)));
@@ -233,7 +233,7 @@ Json::Value LedgerEntrySet::getJson(int) const
Json::Value ret(Json::objectValue);
Json::Value nodes(Json::arrayValue);
for (boost::unordered_map<uint256, LedgerEntrySetEntry>::const_iterator it = mEntries.begin(),
for (std::map<uint256, LedgerEntrySetEntry>::const_iterator it = mEntries.begin(),
end = mEntries.end(); it != end; ++it)
{
Json::Value entry(Json::objectValue);
@@ -269,7 +269,7 @@ Json::Value LedgerEntrySet::getJson(int) const
SLE::pointer LedgerEntrySet::getForMod(const uint256& node, Ledger::ref ledger,
boost::unordered_map<uint256, SLE::pointer>& newMods)
{
boost::unordered_map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(node);
std::map<uint256, LedgerEntrySetEntry>::iterator it = mEntries.find(node);
if (it != mEntries.end())
{
if (it->second.mAction == taaDELETE)
@@ -310,7 +310,8 @@ bool LedgerEntrySet::threadTx(const NewcoinAddress& threadTo, Ledger::ref ledger
return threadTx(sle, ledger, newMods);
}
bool LedgerEntrySet::threadTx(SLE::ref threadTo, Ledger::ref ledger, boost::unordered_map<uint256, SLE::pointer>& newMods)
bool LedgerEntrySet::threadTx(SLE::ref threadTo, Ledger::ref ledger,
boost::unordered_map<uint256, SLE::pointer>& newMods)
{ // node = the node that was modified/deleted/created
// threadTo = the node that needs to know
uint256 prevTxID;
@@ -323,7 +324,8 @@ bool LedgerEntrySet::threadTx(SLE::ref threadTo, Ledger::ref ledger, boost::unor
return false;
}
bool LedgerEntrySet::threadOwners(SLE::ref node, Ledger::ref ledger, boost::unordered_map<uint256, SLE::pointer>& newMods)
bool LedgerEntrySet::threadOwners(SLE::ref node, Ledger::ref ledger,
boost::unordered_map<uint256, SLE::pointer>& newMods)
{ // thread new or modified node to owner or owners
if (node->hasOneOwner()) // thread to owner's account
{
@@ -351,7 +353,7 @@ void LedgerEntrySet::calcRawMeta(Serializer& s)
// Entries modified only as a result of building the transaction metadata
boost::unordered_map<uint256, SLE::pointer> newMod;
for (boost::unordered_map<uint256, LedgerEntrySetEntry>::const_iterator it = mEntries.begin(),
for (std::map<uint256, LedgerEntrySetEntry>::const_iterator it = mEntries.begin(),
end = mEntries.end(); it != end; ++it)
{
int nType = TMNEndOfMetadata;
@@ -410,8 +412,10 @@ void LedgerEntrySet::calcRawMeta(Serializer& s)
if (origNode->getType() == ltRIPPLE_STATE)
{
metaNode.addAccount(TMSLowID, NewcoinAddress::createAccountID(origNode->getFieldAmount(sfLowLimit).getIssuer()));
metaNode.addAccount(TMSHighID, NewcoinAddress::createAccountID(origNode->getFieldAmount(sfHighLimit).getIssuer()));
metaNode.addAccount(TMSLowID,
NewcoinAddress::createAccountID(origNode->getFieldAmount(sfLowLimit).getIssuer()));
metaNode.addAccount(TMSHighID,
NewcoinAddress::createAccountID(origNode->getFieldAmount(sfHighLimit).getIssuer()));
}
}

View File

@@ -32,11 +32,11 @@ class LedgerEntrySet
{
protected:
Ledger::pointer mLedger;
boost::unordered_map<uint256, LedgerEntrySetEntry> mEntries;
std::map<uint256, LedgerEntrySetEntry> mEntries; // cannot be unordered!
TransactionMetaSet mSet;
int mSeq;
LedgerEntrySet(Ledger::ref ledger, const boost::unordered_map<uint256, LedgerEntrySetEntry> &e,
LedgerEntrySet(Ledger::ref ledger, const std::map<uint256, LedgerEntrySetEntry> &e,
const TransactionMetaSet& s, int m) : mLedger(ledger), mEntries(e), mSet(s), mSeq(m) { ; }
SLE::pointer getForMod(const uint256& node, Ledger::ref ledger,
@@ -123,11 +123,11 @@ public:
void calcRawMeta(Serializer&);
// iterator functions
bool isEmpty() const { return mEntries.empty(); }
boost::unordered_map<uint256, LedgerEntrySetEntry>::const_iterator begin() const { return mEntries.begin(); }
boost::unordered_map<uint256, LedgerEntrySetEntry>::const_iterator end() const { return mEntries.end(); }
boost::unordered_map<uint256, LedgerEntrySetEntry>::iterator begin() { return mEntries.begin(); }
boost::unordered_map<uint256, LedgerEntrySetEntry>::iterator end() { return mEntries.end(); }
bool isEmpty() const { return mEntries.empty(); }
std::map<uint256, LedgerEntrySetEntry>::const_iterator begin() const { return mEntries.begin(); }
std::map<uint256, LedgerEntrySetEntry>::const_iterator end() const { return mEntries.end(); }
std::map<uint256, LedgerEntrySetEntry>::iterator begin() { return mEntries.begin(); }
std::map<uint256, LedgerEntrySetEntry>::iterator end() { return mEntries.end(); }
static bool intersect(const LedgerEntrySet& lesLeft, const LedgerEntrySet& lesRight);
};

View File

@@ -36,6 +36,7 @@ void LedgerHistory::addAcceptedLedger(Ledger::pointer ledger)
assert(ledger->isAccepted());
assert(ledger->isImmutable());
mLedgersByIndex.insert(std::make_pair(ledger->getLedgerSeq(), ledger));
boost::thread thread(boost::bind(&Ledger::saveAcceptedLedger, ledger));
thread.detach();
}

View File

@@ -117,6 +117,8 @@ Transaction::pointer NetworkOPs::processTransaction(Transaction::pointer trans,
TER r = mLedgerMaster->doTransaction(*trans->getSTransaction(), tapOPEN_LEDGER);
trans->setResult(r);
#ifdef DEBUG
if (r != tesSUCCESS)
{
@@ -136,7 +138,7 @@ Transaction::pointer NetworkOPs::processTransaction(Transaction::pointer trans,
mLedgerMaster->addHeldTransaction(trans);
return trans;
}
if ((r == tefPAST_SEQ))
if (r == tefPAST_SEQ)
{ // duplicate or conflict
cLog(lsINFO) << "Transaction is obsolete";
trans->setStatus(OBSOLETE);
@@ -894,12 +896,12 @@ Json::Value NetworkOPs::pubBootstrapAccountInfo(Ledger::ref lpAccepted, const Ne
{
Json::Value jvObj(Json::objectValue);
jvObj["type"] = "accountInfoBootstrap";
jvObj["account"] = naAccountID.humanAccountID();
jvObj["owner"] = getOwnerInfo(lpAccepted, naAccountID);
jvObj["seq"] = lpAccepted->getLedgerSeq();
jvObj["hash"] = lpAccepted->getHash().ToString();
jvObj["time"] = Json::Value::UInt(lpAccepted->getCloseTimeNC());
jvObj["type"] = "accountInfoBootstrap";
jvObj["account"] = naAccountID.humanAccountID();
jvObj["owner"] = getOwnerInfo(lpAccepted, naAccountID);
jvObj["ledger_closed_index"] = lpAccepted->getLedgerSeq();
jvObj["ledger_closed"] = lpAccepted->getHash().ToString();
jvObj["time"] = Json::Value::UInt(lpAccepted->getCloseTimeNC());
return jvObj;
}
@@ -934,7 +936,7 @@ void NetworkOPs::pubLedger(Ledger::ref lpAccepted)
{
Json::Value jvObj(Json::objectValue);
jvObj["type"] = "ledgerAccepted";
jvObj["type"] = "ledgerClosed";
jvObj["ledger_closed_index"] = lpAccepted->getLedgerSeq();
jvObj["ledger_closed"] = lpAccepted->getHash().ToString();
jvObj["time"] = Json::Value::UInt(lpAccepted->getCloseTimeNC());
@@ -959,11 +961,11 @@ void NetworkOPs::pubLedger(Ledger::ref lpAccepted)
Json::Value jvObj(Json::objectValue);
jvObj["type"] = "ledgerAcceptedAccounts";
jvObj["seq"] = lpAccepted->getLedgerSeq();
jvObj["hash"] = lpAccepted->getHash().ToString();
jvObj["time"] = Json::Value::UInt(lpAccepted->getCloseTimeNC());
jvObj["accounts"] = jvAccounts;
jvObj["type"] = "ledgerClosedAccounts";
jvObj["ledger_closed_index"] = lpAccepted->getLedgerSeq();
jvObj["ledger_closed"] = lpAccepted->getHash().ToString();
jvObj["time"] = Json::Value::UInt(lpAccepted->getCloseTimeNC());
jvObj["accounts"] = jvAccounts;
BOOST_FOREACH(InfoSub* ispListener, mSubLedgerAccounts)
{
@@ -990,12 +992,12 @@ void NetworkOPs::pubLedger(Ledger::ref lpAccepted)
if (bAll)
{
pubTransactionAll(lpAccepted, *stTxn, terResult, "accepted");
pubTransactionAll(lpAccepted, *stTxn, terResult, "closed");
}
if (bAccounts)
{
pubTransactionAccounts(lpAccepted, *stTxn, terResult, "accepted");
pubTransactionAccounts(lpAccepted, *stTxn, terResult, "closed");
}
}
}
@@ -1020,27 +1022,35 @@ void NetworkOPs::pubLedger(Ledger::ref lpAccepted)
// XXX Publish delta information for accounts.
}
Json::Value NetworkOPs::transJson(const SerializedTransaction& stTxn, TER terResult, const std::string& strStatus, int iSeq, const std::string& strType)
Json::Value NetworkOPs::transJson(const SerializedTransaction& stTxn, TER terResult, bool bAccepted, Ledger::ref lpCurrent, const std::string& strType)
{
Json::Value jvObj(Json::objectValue);
std::string strToken;
std::string strHuman;
std::string sToken;
std::string sHuman;
transResultInfo(terResult, strToken, strHuman);
transResultInfo(terResult, sToken, sHuman);
jvObj["type"] = strType;
jvObj["transaction"] = stTxn.getJson(0);
jvObj["transaction"]["inLedger"] = iSeq;
jvObj["transaction"]["status"] = strStatus;
jvObj["result"] = strToken;
jvObj["result_code"] = terResult;
if (bAccepted) {
jvObj["ledger_closed_index"] = lpCurrent->getLedgerSeq();
jvObj["ledger_closed"] = lpCurrent->getHash().ToString();
}
else
{
jvObj["ledger_current_index"] = lpCurrent->getLedgerSeq();
}
jvObj["status"] = bAccepted ? "closed" : "proposed";
jvObj["engine_result"] = sToken;
jvObj["engine_result_code"] = terResult;
jvObj["engine_result_message"] = sHuman;
return jvObj;
}
void NetworkOPs::pubTransactionAll(Ledger::ref lpCurrent, const SerializedTransaction& stTxn, TER terResult, const char* pState)
void NetworkOPs::pubTransactionAll(Ledger::ref lpCurrent, const SerializedTransaction& stTxn, TER terResult, bool bAccepted)
{
Json::Value jvObj = transJson(stTxn, terResult, pState, lpCurrent->getLedgerSeq(), "transaction");
Json::Value jvObj = transJson(stTxn, terResult, bAccepted, lpCurrent, "transaction");
BOOST_FOREACH(InfoSub* ispListener, mSubTransaction)
{
@@ -1048,7 +1058,7 @@ void NetworkOPs::pubTransactionAll(Ledger::ref lpCurrent, const SerializedTransa
}
}
void NetworkOPs::pubTransactionAccounts(Ledger::ref lpCurrent, const SerializedTransaction& stTxn, TER terResult, const char* pState)
void NetworkOPs::pubTransactionAccounts(Ledger::ref lpCurrent, const SerializedTransaction& stTxn, TER terResult, bool bAccepted)
{
boost::unordered_set<InfoSub*> usisNotify;
@@ -1074,7 +1084,7 @@ void NetworkOPs::pubTransactionAccounts(Ledger::ref lpCurrent, const SerializedT
if (!usisNotify.empty())
{
Json::Value jvObj = transJson(stTxn, terResult, pState, lpCurrent->getLedgerSeq(), "account");
Json::Value jvObj = transJson(stTxn, terResult, bAccepted, lpCurrent, "account");
BOOST_FOREACH(InfoSub* ispListener, usisNotify)
{

View File

@@ -82,9 +82,9 @@ protected:
void setMode(OperatingMode);
Json::Value transJson(const SerializedTransaction& stTxn, TER terResult, const std::string& strStatus, int iSeq, const std::string& strType);
void pubTransactionAll(Ledger::ref lpCurrent, const SerializedTransaction& stTxn, TER terResult, const char* pState);
void pubTransactionAccounts(Ledger::ref lpCurrent, const SerializedTransaction& stTxn, TER terResult, const char* pState);
Json::Value transJson(const SerializedTransaction& stTxn, TER terResult, bool bAccepted, Ledger::ref lpCurrent, const std::string& strType);
void pubTransactionAll(Ledger::ref lpCurrent, const SerializedTransaction& stTxn, TER terResult, bool bAccepted);
void pubTransactionAccounts(Ledger::ref lpCurrent, const SerializedTransaction& stTxn, TER terResult, bool bAccepted);
bool haveConsensusObject();
Json::Value pubBootstrapAccountInfo(Ledger::ref lpAccepted, const NewcoinAddress& naAccountID);

View File

@@ -1035,7 +1035,10 @@ void Peer::recvGetLedger(ripple::TMGetLedger& packet)
if ((!ledger) || (packet.has_ledgerseq() && (packet.ledgerseq() != ledger->getLedgerSeq())))
{
punishPeer(PP_UNKNOWN_REQUEST);
cLog(lsWARNING) << "Can't find the ledger they want";
if (ledger)
cLog(lsWARNING) << "Ledger has wrong sequence";
else
cLog(lsWARNING) << "Can't find the ledger they want";
return;
}
@@ -1056,7 +1059,7 @@ void Peer::recvGetLedger(ripple::TMGetLedger& packet)
{ // new-style root request
cLog(lsINFO) << "Ledger root w/map roots request";
SHAMap::pointer map = ledger->peekAccountStateMap();
if (map)
if (map && map->getHash().isNonZero())
{ // return account state root node if possible
Serializer rootNode(768);
if (map->getRootNode(rootNode, snfWIRE))
@@ -1065,7 +1068,7 @@ void Peer::recvGetLedger(ripple::TMGetLedger& packet)
if (ledger->getTransHash().isNonZero())
{
map = ledger->peekTransactionMap();
if (map)
if (map && map->getHash().isNonZero())
{
rootNode.resize(0);
if (map->getRootNode(rootNode, snfWIRE))

View File

@@ -230,11 +230,13 @@ void SHAMap::returnNode(SHAMapTreeNode::pointer& node, bool modify)
assert(node->getSeq() <= mSeq);
if (node && modify && (node->getSeq() != mSeq))
{ // have a CoW
if (mDirtyNodes) (*mDirtyNodes)[*node] = node;
node = boost::make_shared<SHAMapTreeNode>(*node, mSeq);
if (mDirtyNodes)
(*mDirtyNodes)[*node] = node;
assert(node->isValid());
mTNByID[*node] = node;
if (node->isRoot()) root = node;
if (node->isRoot())
root = node;
}
}
@@ -549,7 +551,7 @@ bool SHAMap::delItem(const uint256& id)
return true;
}
bool SHAMap::addGiveItem(const SHAMapItem::pointer& item, bool isTransaction, bool hasMeta)
bool SHAMap::addGiveItem(SHAMapItem::ref item, bool isTransaction, bool hasMeta)
{ // add the specified item, does not update
#ifdef ST_DEBUG
std::cerr << "aGI " << item->getTag() << std::endl;
@@ -647,7 +649,7 @@ bool SHAMap::addItem(const SHAMapItem& i, bool isTransaction, bool hasMetaData)
return addGiveItem(boost::make_shared<SHAMapItem>(i), isTransaction, hasMetaData);
}
bool SHAMap::updateGiveItem(const SHAMapItem::pointer& item, bool isTransaction, bool hasMeta)
bool SHAMap::updateGiveItem(SHAMapItem::ref item, bool isTransaction, bool hasMeta)
{ // can't change the tag but can change the hash
uint256 tag = item->getTag();
@@ -686,12 +688,15 @@ void SHAMapItem::dump()
SHAMapTreeNode::pointer SHAMap::fetchNodeExternal(const SHAMapNode& id, const uint256& hash)
{
if (!theApp->running())
{
cLog(lsTRACE) << "Trying to fetch external node with application not running";
throw SHAMapMissingNode(mType, id, hash);
}
HashedObject::pointer obj(theApp->getHashedObjectStore().retrieve(hash));
if (!obj)
{
Log(lsTRACE) << "fetchNodeExternal: missing " << hash;
// Log(lsTRACE) << "fetchNodeExternal: missing " << hash;
throw SHAMapMissingNode(mType, id, hash);
}
assert(Serializer::getSHA512Half(obj->getData()) == hash);
@@ -699,9 +704,7 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternal(const SHAMapNode& id, const ui
try
{
SHAMapTreeNode::pointer ret = boost::make_shared<SHAMapTreeNode>(id, obj->getData(), mSeq, snfPREFIX);
#ifdef DEBUG
assert((ret->getNodeHash() == hash) && (id == *ret));
#endif
return ret;
}
catch (...)
@@ -713,9 +716,18 @@ SHAMapTreeNode::pointer SHAMap::fetchNodeExternal(const SHAMapNode& id, const ui
void SHAMap::fetchRoot(const uint256& hash)
{
if (sLog(lsTRACE))
{
if (mType == smtTRANSACTION)
Log(lsTRACE) << "Fetch root TXN node " << hash;
else if (mType == smtSTATE)
Log(lsTRACE) << "Fetch root STATE node " << hash;
else
Log(lsTRACE) << "Fetch root SHAMap node " << hash;
}
root = fetchNodeExternal(SHAMapNode(), hash);
root->makeInner();
mTNByID[*root] = root;
assert(root->getNodeHash() == hash);
}
void SHAMap::armDirty()
@@ -735,8 +747,8 @@ int SHAMap::flushDirty(int maxNodes, HashedObjectType t, uint32 seq)
boost::unordered_map<SHAMapNode, SHAMapTreeNode::pointer>::iterator it = dirtyNodes.begin();
while (it != dirtyNodes.end())
{
tLog(mType == smtTRANSACTION, lsDEBUG) << "TX node write " << it->first;
tLog(mType == smtSTATE, lsDEBUG) << "STATE node write " << it->first;
// tLog(mType == smtTRANSACTION, lsDEBUG) << "TX node write " << it->first;
// tLog(mType == smtSTATE, lsDEBUG) << "STATE node write " << it->first;
s.erase();
it->second->addRaw(s, snfPREFIX);
theApp->getHashedObjectStore().store(t, seq, s.peekData(), s.getSHA512Half());

View File

@@ -195,6 +195,7 @@ public:
bool isInnerNode() const { return !mItem; }
bool setChildHash(int m, const uint256& hash);
bool isEmptyBranch(int m) const { return !mHashes[m]; }
bool isEmpty() const;
int getBranchCount() const;
void makeInner();
const uint256& getChildHash(int m) const
@@ -400,6 +401,8 @@ public:
static std::vector<unsigned char> checkTrustedPath(const uint256& ledgerHash, const uint256& leafIndex,
const std::list<std::vector<unsigned char> >& path);
void walkMap(std::vector<SHAMapMissingNode>& missingNodes, int maxMissing);
bool deepCompare(SHAMap& other);
virtual void dump(bool withHashes = false);
};

View File

@@ -186,3 +186,38 @@ bool SHAMap::compare(SHAMap::ref otherMap, SHAMapDiff& differences, int maxCount
return true;
}
void SHAMap::walkMap(std::vector<SHAMapMissingNode>& missingNodes, int maxMissing)
{
std::stack<SHAMapTreeNode::pointer> nodeStack;
boost::recursive_mutex::scoped_lock sl(mLock);
if (!root->isInner()) // root is only node, and we have it
return;
nodeStack.push(root);
while (!nodeStack.empty())
{
SHAMapTreeNode::pointer node = nodeStack.top();
nodeStack.pop();
for (int i = 0; i < 16; ++i)
if (!node->isEmptyBranch(i))
{
try
{
SHAMapTreeNode::pointer d = getNode(node->getChildNodeID(i), node->getChildHash(i), false);
if (d->isInner())
nodeStack.push(d);
}
catch (SHAMapMissingNode& n)
{
missingNodes.push_back(n);
if (--maxMissing <= 0)
return;
}
}
}
}

View File

@@ -278,6 +278,8 @@ SHAMapTreeNode::SHAMapTreeNode(const SHAMapNode& id, const std::vector<unsigned
}
else if (prefix == sHP_LeafNode)
{
if (s.getLength() < 32)
throw std::runtime_error("short PLN node");
uint256 u;
s.get256(u, s.getLength() - 32);
s.chop(32);
@@ -291,7 +293,7 @@ SHAMapTreeNode::SHAMapTreeNode(const SHAMapNode& id, const std::vector<unsigned
}
else if (prefix == sHP_InnerNode)
{
if (rawNode.size() != (512 + 4))
if (s.getLength() != 512)
throw std::runtime_error("invalid PIN node");
for (int i = 0; i < 16; ++i)
s.get256(mHashes[i] , i * 32);
@@ -347,9 +349,11 @@ bool SHAMapTreeNode::updateHash()
{
nh = Serializer::getPrefixHash(sHP_TransactionNode, mItem->peekData());
}
else assert(false);
else
assert(false);
if (nh == mHash) return false;
if (nh == mHash)
return false;
mHash = nh;
return true;
}
@@ -357,10 +361,12 @@ bool SHAMapTreeNode::updateHash()
void SHAMapTreeNode::addRaw(Serializer& s, SHANodeFormat format)
{
assert((format == snfPREFIX) || (format == snfWIRE));
if (mType == tnERROR) throw std::runtime_error("invalid I node type");
if (mType == tnERROR)
throw std::runtime_error("invalid I node type");
if (mType == tnINNER)
{
assert(!isEmpty());
if (format == snfPREFIX)
{
s.add32(sHP_InnerNode);
@@ -449,6 +455,14 @@ SHAMapItem::pointer SHAMapTreeNode::getItem() const
return boost::make_shared<SHAMapItem>(*mItem);
}
bool SHAMapTreeNode::isEmpty() const
{
assert(isInner());
for (int i = 0; i < 16; ++i)
if (mHashes[i].isNonZero()) return false;
return true;
}
int SHAMapTreeNode::getBranchCount() const
{
assert(isInner());
@@ -513,9 +527,9 @@ bool SHAMapTreeNode::setChildHash(int m, const uint256 &hash)
std::ostream& operator<<(std::ostream& out, const SHAMapMissingNode& mn)
{
if (mn.getMapType() == smtTRANSACTION)
out << "Missing/TXN(" << mn.getNodeID() << ")";
out << "Missing/TXN(" << mn.getNodeID() << "/" << mn.getNodeHash() << ")";
else if (mn.getMapType() == smtSTATE)
out << "Missing/STA(" << mn.getNodeID() << ")";
out << "Missing/STA(" << mn.getNodeID() << "/" << mn.getNodeHash() << ")";
else
out << "Missing/" << mn.getNodeID();
return out;

View File

@@ -415,7 +415,7 @@ BOOST_AUTO_TEST_SUITE( SHAMapSync )
BOOST_AUTO_TEST_CASE( SHAMapSync_test )
{
cLog(lsTRACE) << "being sync test";
cLog(lsTRACE) << "begin sync test";
unsigned int seed;
RAND_pseudo_bytes(reinterpret_cast<unsigned char *>(&seed), sizeof(seed));
srand(seed);

View File

@@ -98,10 +98,12 @@ bool SerializedLedgerEntry::thread(const uint256& txID, uint32 ledgerSeq, uint25
uint256 oldPrevTxID = getFieldH256(sfLastTxnID);
Log(lsTRACE) << "Thread Tx:" << txID << " prev:" << oldPrevTxID;
if (oldPrevTxID == txID)
{ // this transaction is already threaded
assert(getFieldU32(sfLastTxnSeq) == ledgerSeq);
return false;
}
prevTxID = oldPrevTxID;
prevLedgerID = getFieldU32(sfLastTxnSeq);
assert(prevTxID != txID);
setFieldH256(sfLastTxnID, txID);
setFieldU32(sfLastTxnSeq, ledgerSeq);
return true;

View File

@@ -13,6 +13,8 @@
#include "TransactionFormats.h"
#include "SerializedTransaction.h"
SETUP_LOG();
std::auto_ptr<SerializedType> STObject::makeDefaultObject(SerializedTypeID id, SField::ref name)
{
assert((id == STI_NOTPRESENT) || (id == name.fieldType));
@@ -154,7 +156,7 @@ bool STObject::setType(const std::vector<SOElement::ptr> &type)
{
if (elem->flags != SOE_OPTIONAL)
{
Log(lsWARNING) << "setType !valid missing " << elem->e_field.fieldName;
cLog(lsWARNING) << "setType !valid missing " << elem->e_field.fieldName;
valid = false;
}
newData.push_back(makeNonPresentObject(elem->e_field));
@@ -168,7 +170,7 @@ bool STObject::setType(const std::vector<SOElement::ptr> &type)
{
if (!t.getFName().isDiscardable())
{
Log(lsWARNING) << "setType !valid leftover: " << t.getFName().getName();
cLog(lsWARNING) << "setType !valid leftover: " << t.getFName().getName();
valid = false;
}
}
@@ -214,7 +216,7 @@ bool STObject::set(SerializerIterator& sit, int depth)
SField::ref fn = SField::getField(type, field);
if (fn.isInvalid())
{
Log(lsWARNING) << "Unknown field: field_type=" << type << ", field_name=" << field;
cLog(lsWARNING) << "Unknown field: field_type=" << type << ", field_name=" << field;
throw std::runtime_error("Unknown field");
}
giveObject(makeDeserializedObject(fn.fieldType, fn, sit, depth + 1));
@@ -853,7 +855,7 @@ STArray* STArray::construct(SerializerIterator& sit, SField::ref field)
SField::ref fn = SField::getField(type, field);
if (fn.isInvalid())
{
Log(lsTRACE) << "Unknown field: " << type << "/" << field;
cLog(lsTRACE) << "Unknown field: " << type << "/" << field;
throw std::runtime_error("Unknown field");
}
@@ -1097,7 +1099,7 @@ std::auto_ptr<STObject> STObject::parseJson(const Json::Value& object, SField::r
NewcoinAddress a;
if (!a.setAccountID(strValue))
{
Log(lsINFO) << "Invalid acccount JSON: " << fieldName << ": " << strValue;
cLog(lsINFO) << "Invalid acccount JSON: " << fieldName << ": " << strValue;
throw std::runtime_error("Account invalid");
}
data.push_back(new STAccount(field, a.getAccountID()));
@@ -1163,8 +1165,8 @@ BOOST_AUTO_TEST_CASE( FieldManipulation_test )
if (object1.getSerializer() == object2.getSerializer())
{
Log(lsINFO) << "O1: " << object1.getJson(0);
Log(lsINFO) << "O2: " << object2.getJson(0);
cLog(lsINFO) << "O1: " << object1.getJson(0);
cLog(lsINFO) << "O2: " << object2.getJson(0);
BOOST_FAIL("STObject error 4");
}
object1.makeFieldAbsent(sfTestH256);

View File

@@ -162,7 +162,9 @@ void SerializedTransaction::setSourceAccount(const NewcoinAddress& naSource)
Json::Value SerializedTransaction::getJson(int options) const
{
Json::Value ret = STObject::getJson(0);
ret["id"] = getTransactionID().GetHex();
ret["hash"] = getTransactionID().GetHex();
return ret;
}

View File

@@ -224,7 +224,6 @@ protected:
: SerializedType(name), mCurrency(cur), mIssuer(iss), mValue(val), mOffset(off),
mIsNative(isNat), mIsNegative(isNeg) { ; }
uint64 toUInt64() const;
static uint64 muldiv(uint64, uint64, uint64);
public:
@@ -287,6 +286,7 @@ public:
void negate() { if (!isZero()) mIsNegative = !mIsNegative; }
void zero() { mOffset = mIsNative ? -100 : 0; mValue = 0; mIsNegative = false; }
int compare(const STAmount&) const;
const uint160& getIssuer() const { return mIssuer; }
void setIssuer(const uint160& uIssuer) { mIssuer = uIssuer; }

View File

@@ -9,6 +9,9 @@
#include <boost/test/unit_test.hpp>
#include "key.h"
#include "Log.h"
SETUP_LOG();
int Serializer::addZeros(size_t uBytes)
{
@@ -185,7 +188,10 @@ int Serializer::addFieldID(int type, int name)
bool Serializer::getFieldID(int& type, int& name, int offset) const
{
if (!get8(type, offset))
{
cLog(lsWARNING) << "gFID: unable to get type";
return false;
}
name = type & 15;
type >>= 4;
if (type == 0)
@@ -193,14 +199,20 @@ bool Serializer::getFieldID(int& type, int& name, int offset) const
if (!get8(type, ++offset))
return false;
if ((type == 0) || (type < 16))
{
cLog(lsWARNING) << "gFID: uncommon type out of range " << type;
return false;
}
}
if (name == 0)
{ // uncommon name
if (!get8(name, ++offset))
return false;
if ((name == 0) || (name < 16))
{
cLog(lsWARNING) << "gFID: uncommon name out of range " << name;
return false;
}
}
return true;
}

View File

@@ -169,7 +169,7 @@ bool TaggedCache<c_Key, c_Data>::canonicalize(const key_type& key, boost::shared
{ // in map, but expired. Update in map, insert in cache
mit->second = data;
mCache.insert(std::make_pair(key, std::make_pair(time(NULL), data)));
return false;
return true;
}
// in map and cache, canonicalize

View File

@@ -14,7 +14,7 @@
#include "Log.h"
Transaction::Transaction(const SerializedTransaction::pointer& sit, bool bValidate)
: mInLedger(0), mStatus(INVALID), mTransaction(sit)
: mInLedger(0), mStatus(INVALID), mResult(temUNCERTAIN), mTransaction(sit)
{
try
{
@@ -60,7 +60,7 @@ Transaction::Transaction(
uint32 uSeq,
const STAmount& saFee,
uint32 uSourceTag) :
mStatus(NEW)
mStatus(NEW), mResult(temUNCERTAIN)
{
mAccountFrom = naSourceAccount;
mFromPubKey = naPublicKey;
@@ -710,18 +710,18 @@ Json::Value Transaction::getJson(int options) const
if (mInLedger) ret["inLedger"]=mInLedger;
switch(mStatus)
switch (mStatus)
{
case NEW: ret["status"] = "new"; break;
case INVALID: ret["status"] = "invalid"; break;
case INCLUDED: ret["status"] = "included"; break;
case CONFLICTED: ret["status"] = "conflicted"; break;
case COMMITTED: ret["status"] = "committed"; break;
case HELD: ret["status"] = "held"; break;
case REMOVED: ret["status"] = "removed"; break;
case OBSOLETE: ret["status"] = "obsolete"; break;
case INCOMPLETE: ret["status"] = "incomplete"; break;
default: ret["status"] = "unknown";
case NEW: ret["status"] = "new"; break;
case INVALID: ret["status"] = "invalid"; break;
case INCLUDED: ret["status"] = "included"; break;
case CONFLICTED: ret["status"] = "conflicted"; break;
case COMMITTED: ret["status"] = "committed"; break;
case HELD: ret["status"] = "held"; break;
case REMOVED: ret["status"] = "removed"; break;
case OBSOLETE: ret["status"] = "obsolete"; break;
case INCOMPLETE: ret["status"] = "incomplete"; break;
default: ret["status"] = "unknown"; break;
}
return ret;

View File

@@ -15,6 +15,7 @@
#include "Serializer.h"
#include "SHAMap.h"
#include "SerializedTransaction.h"
#include "TransactionErr.h"
enum TransStatus
{
@@ -43,6 +44,7 @@ private:
uint32 mInLedger;
TransStatus mStatus;
TER mResult;
SerializedTransaction::pointer mTransaction;
@@ -280,6 +282,9 @@ public:
uint32 getLedger() const { return mInLedger; }
TransStatus getStatus() const { return mStatus; }
TER getResult() { return mResult; }
void setResult(TER terResult) { mResult = terResult; }
void setStatus(TransStatus status, uint32 ledgerSeq);
void setStatus(TransStatus status) { mStatus=status; }
void setLedger(uint32 ledger) { mInLedger = ledger; }

View File

@@ -18,7 +18,7 @@ SETUP_LOG();
void TransactionEngine::txnWrite()
{
// Write back the account states
for (boost::unordered_map<uint256, LedgerEntrySetEntry>::iterator it = mNodes.begin(), end = mNodes.end();
for (std::map<uint256, LedgerEntrySetEntry>::iterator it = mNodes.begin(), end = mNodes.end();
it != end; ++it)
{
const SLE::pointer& sleEntry = it->second.mEntry;

View File

@@ -42,7 +42,7 @@ enum TER // aka TransactionEngineResult
temINVALID,
temREDUNDANT,
temRIPPLE_EMPTY,
temUNCERTAIN,
temUNCERTAIN, // An intermediate result used internally, should never be returned.
temUNKNOWN,
// -199 .. -100: F Failure (sequence number previously used)

View File

@@ -195,7 +195,11 @@ bool TransactionMetaNode::thread(const uint256& prevTx, uint32 prevLgr)
{
BOOST_FOREACH(TransactionMetaNodeEntry& it, mEntries)
if (it.getType() == TMSThread)
{
TMNEThread* a = dynamic_cast<TMNEThread *>(&it);
assert(a && (a->getPrevTxID() == prevTx) && (a->getPrevLgr() == prevLgr));
return false;
}
addNode(new TMNEThread(prevTx, prevLgr));
return true;
}
@@ -267,8 +271,7 @@ TransactionMetaSet::TransactionMetaSet(uint32 ledger, const std::vector<unsigned
void TransactionMetaSet::addRaw(Serializer& s)
{
s.add256(mTransactionID);
for (std::map<uint256, TransactionMetaNode>::iterator it = mNodes.begin(), end = mNodes.end();
it != end; ++it)
for (std::map<uint256, TransactionMetaNode>::iterator it = mNodes.begin(), end = mNodes.end(); it != end; ++it)
it->second.addRaw(s);
s.add8(TMNEndOfMetadata);
}

View File

@@ -81,6 +81,9 @@ public:
virtual void addRaw(Serializer&) const;
virtual Json::Value getJson(int) const;
const uint256& getPrevTxID() const { return mPrevTxID; }
uint32 getPrevLgr() const { return mPrevLgrSeq; }
protected:
virtual TransactionMetaNodeEntry* duplicate(void) const { return new TMNEThread(*this); }
virtual int compare(const TransactionMetaNodeEntry&) const;
@@ -175,7 +178,7 @@ public:
protected:
uint256 mTransactionID;
uint32 mLedger;
std::map<uint256, TransactionMetaNode> mNodes;
std::map<uint256, TransactionMetaNode> mNodes; // must be an ordered set
public:
TransactionMetaSet() : mLedger(0) { ; }

View File

@@ -959,7 +959,19 @@ void WSConnection::doSubmit(Json::Value& jvResult, const Json::Value& jvRequest)
try
{
jvResult["submitted"] = tpTrans->getJson(0);
jvResult["transaction"] = tpTrans->getJson(0);
if (temUNCERTAIN != tpTrans->getResult())
{
std::string sToken;
std::string sHuman;
transResultInfo(tpTrans->getResult(), sToken, sHuman);
jvResult["engine_result"] = sToken;
jvResult["engine_result_code"] = tpTrans->getResult();
jvResult["engine_result_message"] = sHuman;
}
}
catch (std::exception& e)
{

218
test/remote-test.js Normal file
View File

@@ -0,0 +1,218 @@
var buster = require("buster");
var config = require("./config.js");
var server = require("./server.js");
var amount = require("../js/amount.js");
var remote = require("../js/remote.js");
var Amount = amount.Amount;
var fastTearDown = true;
// How long to wait for server to start.
var serverDelay = 1500;
buster.testRunner.timeout = 5000;
buster.testCase("Remote functions", {
'setUp' :
function (done) {
server.start("alpha",
function (e) {
buster.refute(e);
alpha = remote.remoteConfig(config, "alpha");
alpha.connect(function (stat) {
buster.assert(1 == stat); // OPEN
done();
}, serverDelay);
});
},
'tearDown' :
function (done) {
if (fastTearDown) {
// Fast tearDown
server.stop("alpha", function (e) {
buster.refute(e);
done();
});
}
else {
alpha.disconnect(function (stat) {
buster.assert(3 == stat); // CLOSED
server.stop("alpha", function (e) {
buster.refute(e);
done();
});
});
}
},
'request_ledger_current' :
function (done) {
alpha.request_ledger_current().on('success', function (m) {
console.log(m);
buster.assert.equals(m.ledger_current_index, 3);
done();
})
.on('error', function(m) {
console.log(m);
buster.assert(false);
}).request();
},
'request_ledger_closed' :
function (done) {
alpha.request_ledger_closed().on('success', function (m) {
console.log("result: %s", JSON.stringify(m));
buster.assert.equals(m.ledger_closed_index, 2);
done();
})
.on('error', function(m) {
console.log("error: %s", m);
buster.assert(false);
}).request();
},
'manual account_root success' :
function (done) {
alpha.request_ledger_closed().on('success', function (r) {
// console.log("result: %s", JSON.stringify(r));
alpha
.request_ledger_entry('account_root')
.ledger(r.ledger_closed)
.account_root("rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh")
.on('success', function (r) {
// console.log("account_root: %s", JSON.stringify(r));
buster.assert('node' in r);
done();
})
.on('error', function(m) {
console.log("error: %s", m);
buster.assert(false);
}).request();
})
.on('error', function(m) {
console.log("error: %s", m);
buster.assert(false);
}).request();
},
'account_root remote malformedAddress' :
function (done) {
alpha.request_ledger_closed().on('success', function (r) {
console.log("result: %s", JSON.stringify(r));
alpha
.request_ledger_entry('account_root')
.ledger(r.ledger_closed)
.account_root("zHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh")
.on('success', function (r) {
// console.log("account_root: %s", JSON.stringify(r));
buster.assert(false);
})
.on('error', function(m) {
console.log("error: %s", m);
buster.assert.equals(m.error, 'remoteError');
buster.assert.equals(m.remote.error, 'malformedAddress');
done();
}).request();
})
.on('error', function(m) {
console.log("error: %s", m);
buster.assert(false);
}).request();
},
'account_root entryNotFound' :
function (done) {
alpha.request_ledger_closed().on('success', function (r) {
console.log("result: %s", JSON.stringify(r));
alpha
.request_ledger_entry('account_root')
.ledger(r.ledger_closed)
.account_root(config.accounts.alice.account)
.on('success', function (r) {
// console.log("account_root: %s", JSON.stringify(r));
buster.assert(false);
})
.on('error', function(m) {
console.log("error: %s", m);
buster.assert.equals(m.error, 'remoteError');
buster.assert.equals(m.remote.error, 'entryNotFound');
done();
}).request();
})
.on('error', function(m) {
console.log("error: %s", m);
buster.assert(false);
}).request();
},
'ledger_entry index' :
function (done) {
alpha.request_ledger_closed().on('success', function (r) {
console.log("result: %s", JSON.stringify(r));
alpha
.request_ledger_entry('index')
.ledger(r.ledger_closed)
.account_root(config.accounts.alice.account)
.index("2B6AC232AA4C4BE41BF49D2459FA4A0347E1B543A4C92FCEE0821C0201E2E9A8")
.on('success', function (r) {
// console.log("account_root: %s", JSON.stringify(r));
buster.assert('node_binary' in r);
done();
})
.on('error', function(m) {
console.log("error: %s", m);
buster.assert(false);
}).request();
})
.on('error', function(m) {
console.log(m);
buster.assert(false);
}).request();
},
'create account' :
function (done) {
alpha.transaction()
.payment('root', 'alice', Amount.from_json("10000"))
.flags('CreateAccount')
.on('success', function (r) {
// console.log("account_root: %s", JSON.stringify(r));
// Need to verify account and balance.
done();
})
.on('error', function(m) {
console.log("error: %s", m);
buster.assert(false);
}).submit();
},
});
// vim:sw=2:sts=2:ts=8

23
test/server-test.js Normal file
View File

@@ -0,0 +1,23 @@
var buster = require("buster");
var server = require("./server.js");
// How long to wait for server to start.
var serverDelay = 1500;
buster.testRunner.timeout = 5000;
buster.testCase("Standalone server startup", {
"server start and stop" : function (done) {
server.start("alpha",
function (e) {
buster.refute(e);
server.stop("alpha", function (e) {
buster.refute(e);
done();
});
});
}
});
// vim:sw=2:sts=2:ts=8

View File

@@ -19,135 +19,134 @@ var child = require("child_process");
var servers = {};
// Create a server object
var Server = function(name) {
this.name = name;
var Server = function (name) {
this.name = name;
};
// Return a server's rippled.cfg as string.
Server.prototype.configContent = function() {
var cfg = config.servers[this.name];
var cfg = config.servers[this.name];
return Object.keys(cfg).map(function(o) {
return util.format("[%s]\n%s\n", o, cfg[o]);
}).join("");
return Object.keys(cfg).map(function(o) {
return util.format("[%s]\n%s\n", o, cfg[o]);
}).join("");
};
Server.prototype.serverPath = function() {
return "tmp/server/" + this.name;
return "tmp/server/" + this.name;
};
Server.prototype.configPath = function() {
return path.join(this.serverPath(), "rippled.cfg");
return path.join(this.serverPath(), "rippled.cfg");
};
// Write a server's rippled.cfg.
Server.prototype.writeConfig = function(done) {
fs.writeFile(this.configPath(), this.configContent(), 'utf8', done);
fs.writeFile(this.configPath(), this.configContent(), 'utf8', done);
};
// Spawn the server.
Server.prototype.serverSpawnSync = function() {
// Spawn in standalone mode for now.
this.child = child.spawn(
config.rippled,
[
"-a",
"-v",
"--conf=rippled.cfg"
],
{
cwd: this.serverPath(),
env: process.env,
stdio: 'inherit'
});
// Spawn in standalone mode for now.
this.child = child.spawn(
config.rippled,
[
"-a",
"-v",
"--conf=rippled.cfg"
],
{
cwd: this.serverPath(),
env: process.env,
stdio: 'inherit'
});
console.log("server: start %s: %s -a --conf=%s", this.child.pid, config.rippled, this.configPath());
// By default, just log exits.
this.child.on('exit', function(code, signal) {
// If could not exec: code=127, signal=null
// If regular exit: code=0, signal=null
console.log("server: spawn: server exited code=%s: signal=%s", code, signal);
});
console.log("server: start %s: %s -a --conf=%s", this.child.pid, config.rippled, this.configPath());
// By default, just log exits.
this.child.on('exit', function(code, signal) {
// If could not exec: code=127, signal=null
// If regular exit: code=0, signal=null
console.log("server: spawn: server exited code=%s: signal=%s", code, signal);
});
};
// Prepare server's working directory.
Server.prototype.makeBase = function(done) {
var path = this.serverPath();
var self = this;
Server.prototype.makeBase = function (done) {
var path = this.serverPath();
var self = this;
// Reset the server directory, build it if needed.
nodeutils.resetPath(path, '0777', function(e) {
if (e) {
throw e;
}
else {
self.writeConfig(done);
}
});
// Reset the server directory, build it if needed.
nodeutils.resetPath(path, '0777', function (e) {
if (e) {
throw e;
}
else {
self.writeConfig(done);
}
});
};
// Create a standalone server.
// Prepare the working directory and spawn the server.
Server.prototype.start = function(done) {
var self = this;
Server.prototype.start = function (done) {
var self = this;
this.makeBase(function(e) {
if (e) {
throw e;
}
else {
self.serverSpawnSync();
done();
}
});
this.makeBase(function (e) {
if (e) {
throw e;
}
else {
self.serverSpawnSync();
done();
}
});
};
// Stop a standalone server.
Server.prototype.stop = function(done) {
if (this.child) {
// Update the on exit to invoke done.
this.child.on('exit', function(code, signal) {
console.log("server: stop: server exited");
done();
});
this.child.kill();
}
else
{
console.log("server: stop: no such server");
done('noSuchServer');
}
Server.prototype.stop = function (done) {
if (this.child) {
// Update the on exit to invoke done.
this.child.on('exit', function (code, signal) {
console.log("server: stop: server exited");
done();
});
this.child.kill();
}
else
{
console.log("server: stop: no such server");
done('noSuchServer');
}
};
// Start the named server.
exports.start = function(name, done) {
if (servers[name])
{
console.log("server: start: server already started.");
}
else
{
var server = new Server(name);
exports.start = function (name, done) {
if (servers[name])
{
console.log("server: start: server already started.");
}
else
{
var server = new Server(name);
servers[name] = server;
servers[name] = server;
console.log("server: start: %s", JSON.stringify(server));
console.log("server: start: %s", JSON.stringify(server));
server.start(done);
}
server.start(done);
}
};
// Delete the named server.
exports.stop = function(name, done) {
console.log("server: stop: %s of %s", name, Object.keys(servers).toString());
exports.stop = function (name, done) {
console.log("server: stop: %s of %s", name, Object.keys(servers).toString());
var server = servers[name];
if (server) {
server.stop(done);
delete servers[name];
}
var server = servers[name];
if (server) {
server.stop(done);
delete servers[name];
}
};
exports.Server = Server;

View File

@@ -1,201 +0,0 @@
var buster = require("buster");
var config = require("./config.js");
var server = require("./server.js");
var amount = require("../js/amount.js");
var remote = require("../js/remote.js");
var Amount = amount.Amount;
// How long to wait for server to start.
var serverDelay = 1500;
buster.testRunner.timeout = 5000;
buster.testCase("Standalone server startup", {
"server start and stop" : function (done) {
server.start("alpha",
function (e) {
buster.refute(e);
server.stop("alpha", function (e) {
buster.refute(e);
done();
});
});
}
});
buster.testCase("WebSocket connection", {
'setUp' :
function (done) {
server.start("alpha",
function (e) {
buster.refute(e);
done();
}
);
},
'tearDown' :
function (done) {
server.stop("alpha", function (e) {
buster.refute(e);
done();
});
},
"websocket connect and disconnect" :
function (done) {
var alpha = remote.remoteConfig(config, "alpha");
alpha.connect(function (stat) {
buster.assert(1 == stat); // OPEN
alpha.disconnect(function (stat) {
buster.assert(3 == stat); // CLOSED
done();
});
}, serverDelay);
},
});
buster.testCase("Websocket commands", {
'setUp' :
function (done) {
server.start("alpha",
function (e) {
buster.refute(e);
alpha = remote.remoteConfig(config, "alpha");
alpha.connect(function (stat) {
buster.assert(1 == stat); // OPEN
done();
}, serverDelay);
});
},
'tearDown' :
function (done) {
alpha.disconnect(function (stat) {
buster.assert(3 == stat); // CLOSED
server.stop("alpha", function (e) {
buster.refute(e);
done();
});
});
},
'ledger_current' :
function (done) {
alpha.request_ledger_current(function (r) {
console.log(r);
buster.assert.equals(r.ledger_current_index, 3);
done();
});
},
'ledger_closed' :
function (done) {
alpha.request_ledger_closed(function (r) {
console.log("result: %s", JSON.stringify(r));
buster.assert.equals(r.ledger_closed_index, 2);
done();
});
},
'account_root success' :
function (done) {
alpha.request_ledger_closed(function (r) {
// console.log("result: %s", JSON.stringify(r));
buster.refute(r.error);
alpha.request_ledger_entry({
'ledger_closed' : r.ledger_closed,
'type' : 'account_root',
'account_root' : 'rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh'
} , function (r) {
// console.log("account_root: %s", JSON.stringify(r));
buster.assert('node' in r);
done();
});
});
},
'account_root malformedAddress' :
function (done) {
alpha.request_ledger_closed(function (r) {
// console.log("result: %s", JSON.stringify(r));
buster.refute(r.error);
alpha.request_ledger_entry({
'ledger_closed' : r.ledger_closed,
'type' : 'account_root',
'account_root' : 'foobar'
} , function (r) {
// console.log("account_root: %s", JSON.stringify(r));
buster.assert.equals(r.error, 'malformedAddress');
done();
});
});
},
'account_root entryNotFound' :
function (done) {
alpha.request_ledger_closed(function (r) {
console.log("result: %s", JSON.stringify(r));
buster.refute(r.error);
alpha.request_ledger_entry({
'ledger_closed' : r.ledger_closed,
'type' : 'account_root',
'account_root' : config.accounts.alice.account,
}, function (r) {
console.log("account_root: %s", JSON.stringify(r));
buster.assert.equals(r.error, 'entryNotFound');
done();
});
});
},
'ledger_entry index' :
function (done) {
alpha.request_ledger_closed(function (r) {
// console.log("result: %s", JSON.stringify(r));
buster.refute(r.error);
alpha.request_ledger_entry({
'ledger_closed' : r.ledger_closed,
'type' : 'account_root',
'index' : "2B6AC232AA4C4BE41BF49D2459FA4A0347E1B543A4C92FCEE0821C0201E2E9A8",
} , function (r) {
console.log("node: %s", JSON.stringify(r));
buster.assert('node_binary' in r);
done();
});
});
},
'create account' :
function (done) {
alpha.send(undefined, 'root', 'alice', Amount.from_json("10000"), undefined, 'CREATE', function (r) {
console.log(r);
buster.refute(r.error);
done();
});
},
});
// vim:sw=2:sts=2:ts=8

46
test/websocket-test.js Normal file
View File

@@ -0,0 +1,46 @@
var buster = require("buster");
var config = require("./config.js");
var server = require("./server.js");
var remote = require("../js/remote.js");
// How long to wait for server to start.
var serverDelay = 1500;
buster.testRunner.timeout = 5000;
buster.testCase("WebSocket connection", {
'setUp' :
function (done) {
server.start("alpha",
function (e) {
buster.refute(e);
done();
}
);
},
'tearDown' :
function (done) {
server.stop("alpha", function (e) {
buster.refute(e);
done();
});
},
"websocket connect and disconnect" :
function (done) {
var alpha = remote.remoteConfig(config, "alpha", 'TRACE');
alpha.connect(function (stat) {
buster.assert.equals(stat, 1); // OPEN
alpha.disconnect(function (stat) {
buster.assert.equals(stat, 3); // CLOSED
done();
});
}, serverDelay);
},
});
// vim:sw=2:sts=2:ts=8