mirror of
https://github.com/XRPLF/rippled.git
synced 2025-12-06 17:27:55 +00:00
New command line LedgerTool. (RIPD-243)
* Retrieve and process summary or full ledgers. * Search using arbitrary criteria (any Python function). * Search using arbitrary formats (any Python function). * Caches ledgers as .gz files to avoid repeated server requests. * Handles ledger numbers, ranges, and special names like validated or closed.
This commit is contained in:
16
bin/LedgerTool.py
Executable file
16
bin/LedgerTool.py
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from ripple.ledger import Server
|
||||
from ripple.ledger.Args import ARGS
|
||||
from ripple.util.CommandList import CommandList
|
||||
|
||||
from ripple.ledger.commands import Info, Print
|
||||
|
||||
_COMMANDS = CommandList(Info, Print)
|
||||
|
||||
if __name__ == '__main__':
|
||||
server = Server.Server()
|
||||
args = list(ARGS.command)
|
||||
_COMMANDS.run_safe(args.pop(0), server, *args)
|
||||
8
bin/README.md
Normal file
8
bin/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
Unit Tests
|
||||
==========
|
||||
|
||||
To run the Python unit tests, execute:
|
||||
|
||||
python -m unittest discover
|
||||
|
||||
from this directory.
|
||||
251
bin/decorator.py
Normal file
251
bin/decorator.py
Normal file
@@ -0,0 +1,251 @@
|
||||
########################## LICENCE ###############################
|
||||
|
||||
# Copyright (c) 2005-2012, Michele Simionato
|
||||
# All rights reserved.
|
||||
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
|
||||
# Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# Redistributions in bytecode form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in
|
||||
# the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
||||
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
|
||||
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
|
||||
# DAMAGE.
|
||||
|
||||
"""
|
||||
Decorator module, see http://pypi.python.org/pypi/decorator
|
||||
for the documentation.
|
||||
"""
|
||||
|
||||
__version__ = '3.4.0'
|
||||
|
||||
__all__ = ["decorator", "FunctionMaker", "contextmanager"]
|
||||
|
||||
import sys, re, inspect
|
||||
if sys.version >= '3':
|
||||
from inspect import getfullargspec
|
||||
def get_init(cls):
|
||||
return cls.__init__
|
||||
else:
|
||||
class getfullargspec(object):
|
||||
"A quick and dirty replacement for getfullargspec for Python 2.X"
|
||||
def __init__(self, f):
|
||||
self.args, self.varargs, self.varkw, self.defaults = \
|
||||
inspect.getargspec(f)
|
||||
self.kwonlyargs = []
|
||||
self.kwonlydefaults = None
|
||||
def __iter__(self):
|
||||
yield self.args
|
||||
yield self.varargs
|
||||
yield self.varkw
|
||||
yield self.defaults
|
||||
def get_init(cls):
|
||||
return cls.__init__.im_func
|
||||
|
||||
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
|
||||
|
||||
# basic functionality
|
||||
class FunctionMaker(object):
|
||||
"""
|
||||
An object with the ability to create functions with a given signature.
|
||||
It has attributes name, doc, module, signature, defaults, dict and
|
||||
methods update and make.
|
||||
"""
|
||||
def __init__(self, func=None, name=None, signature=None,
|
||||
defaults=None, doc=None, module=None, funcdict=None):
|
||||
self.shortsignature = signature
|
||||
if func:
|
||||
# func can be a class or a callable, but not an instance method
|
||||
self.name = func.__name__
|
||||
if self.name == '<lambda>': # small hack for lambda functions
|
||||
self.name = '_lambda_'
|
||||
self.doc = func.__doc__
|
||||
self.module = func.__module__
|
||||
if inspect.isfunction(func):
|
||||
argspec = getfullargspec(func)
|
||||
self.annotations = getattr(func, '__annotations__', {})
|
||||
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
|
||||
'kwonlydefaults'):
|
||||
setattr(self, a, getattr(argspec, a))
|
||||
for i, arg in enumerate(self.args):
|
||||
setattr(self, 'arg%d' % i, arg)
|
||||
if sys.version < '3': # easy way
|
||||
self.shortsignature = self.signature = \
|
||||
inspect.formatargspec(
|
||||
formatvalue=lambda val: "", *argspec)[1:-1]
|
||||
else: # Python 3 way
|
||||
allargs = list(self.args)
|
||||
allshortargs = list(self.args)
|
||||
if self.varargs:
|
||||
allargs.append('*' + self.varargs)
|
||||
allshortargs.append('*' + self.varargs)
|
||||
elif self.kwonlyargs:
|
||||
allargs.append('*') # single star syntax
|
||||
for a in self.kwonlyargs:
|
||||
allargs.append('%s=None' % a)
|
||||
allshortargs.append('%s=%s' % (a, a))
|
||||
if self.varkw:
|
||||
allargs.append('**' + self.varkw)
|
||||
allshortargs.append('**' + self.varkw)
|
||||
self.signature = ', '.join(allargs)
|
||||
self.shortsignature = ', '.join(allshortargs)
|
||||
self.dict = func.__dict__.copy()
|
||||
# func=None happens when decorating a caller
|
||||
if name:
|
||||
self.name = name
|
||||
if signature is not None:
|
||||
self.signature = signature
|
||||
if defaults:
|
||||
self.defaults = defaults
|
||||
if doc:
|
||||
self.doc = doc
|
||||
if module:
|
||||
self.module = module
|
||||
if funcdict:
|
||||
self.dict = funcdict
|
||||
# check existence required attributes
|
||||
assert hasattr(self, 'name')
|
||||
if not hasattr(self, 'signature'):
|
||||
raise TypeError('You are decorating a non function: %s' % func)
|
||||
|
||||
def update(self, func, **kw):
|
||||
"Update the signature of func with the data in self"
|
||||
func.__name__ = self.name
|
||||
func.__doc__ = getattr(self, 'doc', None)
|
||||
func.__dict__ = getattr(self, 'dict', {})
|
||||
func.func_defaults = getattr(self, 'defaults', ())
|
||||
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
|
||||
func.__annotations__ = getattr(self, 'annotations', None)
|
||||
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
|
||||
func.__module__ = getattr(self, 'module', callermodule)
|
||||
func.__dict__.update(kw)
|
||||
|
||||
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
|
||||
"Make a new function from a given template and update the signature"
|
||||
src = src_templ % vars(self) # expand name and signature
|
||||
evaldict = evaldict or {}
|
||||
mo = DEF.match(src)
|
||||
if mo is None:
|
||||
raise SyntaxError('not a valid function template\n%s' % src)
|
||||
name = mo.group(1) # extract the function name
|
||||
names = set([name] + [arg.strip(' *') for arg in
|
||||
self.shortsignature.split(',')])
|
||||
for n in names:
|
||||
if n in ('_func_', '_call_'):
|
||||
raise NameError('%s is overridden in\n%s' % (n, src))
|
||||
if not src.endswith('\n'): # add a newline just for safety
|
||||
src += '\n' # this is needed in old versions of Python
|
||||
try:
|
||||
code = compile(src, '<string>', 'single')
|
||||
# print >> sys.stderr, 'Compiling %s' % src
|
||||
exec code in evaldict
|
||||
except:
|
||||
print >> sys.stderr, 'Error in generated code:'
|
||||
print >> sys.stderr, src
|
||||
raise
|
||||
func = evaldict[name]
|
||||
if addsource:
|
||||
attrs['__source__'] = src
|
||||
self.update(func, **attrs)
|
||||
return func
|
||||
|
||||
@classmethod
|
||||
def create(cls, obj, body, evaldict, defaults=None,
|
||||
doc=None, module=None, addsource=True, **attrs):
|
||||
"""
|
||||
Create a function from the strings name, signature and body.
|
||||
evaldict is the evaluation dictionary. If addsource is true an attribute
|
||||
__source__ is added to the result. The attributes attrs are added,
|
||||
if any.
|
||||
"""
|
||||
if isinstance(obj, str): # "name(signature)"
|
||||
name, rest = obj.strip().split('(', 1)
|
||||
signature = rest[:-1] #strip a right parens
|
||||
func = None
|
||||
else: # a function
|
||||
name = None
|
||||
signature = None
|
||||
func = obj
|
||||
self = cls(func, name, signature, defaults, doc, module)
|
||||
ibody = '\n'.join(' ' + line for line in body.splitlines())
|
||||
return self.make('def %(name)s(%(signature)s):\n' + ibody,
|
||||
evaldict, addsource, **attrs)
|
||||
|
||||
def decorator(caller, func=None):
|
||||
"""
|
||||
decorator(caller) converts a caller function into a decorator;
|
||||
decorator(caller, func) decorates a function using a caller.
|
||||
"""
|
||||
if func is not None: # returns a decorated function
|
||||
evaldict = func.func_globals.copy()
|
||||
evaldict['_call_'] = caller
|
||||
evaldict['_func_'] = func
|
||||
return FunctionMaker.create(
|
||||
func, "return _call_(_func_, %(shortsignature)s)",
|
||||
evaldict, undecorated=func, __wrapped__=func)
|
||||
else: # returns a decorator
|
||||
if inspect.isclass(caller):
|
||||
name = caller.__name__.lower()
|
||||
callerfunc = get_init(caller)
|
||||
doc = 'decorator(%s) converts functions/generators into ' \
|
||||
'factories of %s objects' % (caller.__name__, caller.__name__)
|
||||
fun = getfullargspec(callerfunc).args[1] # second arg
|
||||
elif inspect.isfunction(caller):
|
||||
name = '_lambda_' if caller.__name__ == '<lambda>' \
|
||||
else caller.__name__
|
||||
callerfunc = caller
|
||||
doc = caller.__doc__
|
||||
fun = getfullargspec(callerfunc).args[0] # first arg
|
||||
else: # assume caller is an object with a __call__ method
|
||||
name = caller.__class__.__name__.lower()
|
||||
callerfunc = caller.__call__.im_func
|
||||
doc = caller.__call__.__doc__
|
||||
fun = getfullargspec(callerfunc).args[1] # second arg
|
||||
evaldict = callerfunc.func_globals.copy()
|
||||
evaldict['_call_'] = caller
|
||||
evaldict['decorator'] = decorator
|
||||
return FunctionMaker.create(
|
||||
'%s(%s)' % (name, fun),
|
||||
'return decorator(_call_, %s)' % fun,
|
||||
evaldict, undecorated=caller, __wrapped__=caller,
|
||||
doc=doc, module=caller.__module__)
|
||||
|
||||
######################### contextmanager ########################
|
||||
|
||||
def __call__(self, func):
|
||||
'Context manager decorator'
|
||||
return FunctionMaker.create(
|
||||
func, "with _self_: return _func_(%(shortsignature)s)",
|
||||
dict(_self_=self, _func_=func), __wrapped__=func)
|
||||
|
||||
try: # Python >= 3.2
|
||||
|
||||
from contextlib import _GeneratorContextManager
|
||||
ContextManager = type(
|
||||
'ContextManager', (_GeneratorContextManager,), dict(__call__=__call__))
|
||||
|
||||
except ImportError: # Python >= 2.5
|
||||
|
||||
from contextlib import GeneratorContextManager
|
||||
def __init__(self, f, *a, **k):
|
||||
return GeneratorContextManager.__init__(self, f(*a, **k))
|
||||
ContextManager = type(
|
||||
'ContextManager', (GeneratorContextManager,),
|
||||
dict(__call__=__call__, __init__=__init__))
|
||||
|
||||
contextmanager = decorator(ContextManager)
|
||||
4
bin/jsonpath_rw/__init__.py
Normal file
4
bin/jsonpath_rw/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .jsonpath import *
|
||||
from .parser import parse
|
||||
|
||||
__version__ = '1.3.0'
|
||||
510
bin/jsonpath_rw/jsonpath.py
Normal file
510
bin/jsonpath_rw/jsonpath.py
Normal file
@@ -0,0 +1,510 @@
|
||||
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
|
||||
import logging
|
||||
import six
|
||||
from six.moves import xrange
|
||||
from itertools import *
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Turn on/off the automatic creation of id attributes
|
||||
# ... could be a kwarg pervasively but uses are rare and simple today
|
||||
auto_id_field = None
|
||||
|
||||
class JSONPath(object):
|
||||
"""
|
||||
The base class for JSONPath abstract syntax; those
|
||||
methods stubbed here are the interface to supported
|
||||
JSONPath semantics.
|
||||
"""
|
||||
|
||||
def find(self, data):
|
||||
"""
|
||||
All `JSONPath` types support `find()`, which returns an iterable of `DatumInContext`s.
|
||||
They keep track of the path followed to the current location, so if the calling code
|
||||
has some opinion about that, it can be passed in here as a starting point.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def update(self, data, val):
|
||||
"Returns `data` with the specified path replaced by `val`"
|
||||
raise NotImplementedError()
|
||||
|
||||
def child(self, child):
|
||||
"""
|
||||
Equivalent to Child(self, next) but with some canonicalization
|
||||
"""
|
||||
if isinstance(self, This) or isinstance(self, Root):
|
||||
return child
|
||||
elif isinstance(child, This):
|
||||
return self
|
||||
elif isinstance(child, Root):
|
||||
return child
|
||||
else:
|
||||
return Child(self, child)
|
||||
|
||||
def make_datum(self, value):
|
||||
if isinstance(value, DatumInContext):
|
||||
return value
|
||||
else:
|
||||
return DatumInContext(value, path=Root(), context=None)
|
||||
|
||||
class DatumInContext(object):
|
||||
"""
|
||||
Represents a datum along a path from a context.
|
||||
|
||||
Essentially a zipper but with a structure represented by JsonPath,
|
||||
and where the context is more of a parent pointer than a proper
|
||||
representation of the context.
|
||||
|
||||
For quick-and-dirty work, this proxies any non-special attributes
|
||||
to the underlying datum, but the actual datum can (and usually should)
|
||||
be retrieved via the `value` attribute.
|
||||
|
||||
To place `datum` within another, use `datum.in_context(context=..., path=...)`
|
||||
which extends the path. If the datum already has a context, it places the entire
|
||||
context within that passed in, so an object can be built from the inside
|
||||
out.
|
||||
"""
|
||||
@classmethod
|
||||
def wrap(cls, data):
|
||||
if isinstance(data, cls):
|
||||
return data
|
||||
else:
|
||||
return cls(data)
|
||||
|
||||
def __init__(self, value, path=None, context=None):
|
||||
self.value = value
|
||||
self.path = path or This()
|
||||
self.context = None if context is None else DatumInContext.wrap(context)
|
||||
|
||||
def in_context(self, context, path):
|
||||
context = DatumInContext.wrap(context)
|
||||
|
||||
if self.context:
|
||||
return DatumInContext(value=self.value, path=self.path, context=context.in_context(path=path, context=context))
|
||||
else:
|
||||
return DatumInContext(value=self.value, path=path, context=context)
|
||||
|
||||
@property
|
||||
def full_path(self):
|
||||
return self.path if self.context is None else self.context.full_path.child(self.path)
|
||||
|
||||
@property
|
||||
def id_pseudopath(self):
|
||||
"""
|
||||
Looks like a path, but with ids stuck in when available
|
||||
"""
|
||||
try:
|
||||
pseudopath = Fields(str(self.value[auto_id_field]))
|
||||
except (TypeError, AttributeError, KeyError): # This may not be all the interesting exceptions
|
||||
pseudopath = self.path
|
||||
|
||||
if self.context:
|
||||
return self.context.id_pseudopath.child(pseudopath)
|
||||
else:
|
||||
return pseudopath
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(value=%r, path=%r, context=%r)' % (self.__class__.__name__, self.value, self.path, self.context)
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, DatumInContext) and other.value == self.value and other.path == self.path and self.context == other.context
|
||||
|
||||
class AutoIdForDatum(DatumInContext):
|
||||
"""
|
||||
This behaves like a DatumInContext, but the value is
|
||||
always the path leading up to it, not including the "id",
|
||||
and with any "id" fields along the way replacing the prior
|
||||
segment of the path
|
||||
|
||||
For example, it will make "foo.bar.id" return a datum
|
||||
that behaves like DatumInContext(value="foo.bar", path="foo.bar.id").
|
||||
|
||||
This is disabled by default; it can be turned on by
|
||||
settings the `auto_id_field` global to a value other
|
||||
than `None`.
|
||||
"""
|
||||
|
||||
def __init__(self, datum, id_field=None):
|
||||
"""
|
||||
Invariant is that datum.path is the path from context to datum. The auto id
|
||||
will either be the id in the datum (if present) or the id of the context
|
||||
followed by the path to the datum.
|
||||
|
||||
The path to this datum is always the path to the context, the path to the
|
||||
datum, and then the auto id field.
|
||||
"""
|
||||
self.datum = datum
|
||||
self.id_field = id_field or auto_id_field
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
return str(self.datum.id_pseudopath)
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
return self.id_field
|
||||
|
||||
@property
|
||||
def context(self):
|
||||
return self.datum
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%r)' % (self.__class__.__name__, self.datum)
|
||||
|
||||
def in_context(self, context, path):
|
||||
return AutoIdForDatum(self.datum.in_context(context=context, path=path))
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, AutoIdForDatum) and other.datum == self.datum and self.id_field == other.id_field
|
||||
|
||||
|
||||
class Root(JSONPath):
|
||||
"""
|
||||
The JSONPath referring to the "root" object. Concrete syntax is '$'.
|
||||
The root is the topmost datum without any context attached.
|
||||
"""
|
||||
|
||||
def find(self, data):
|
||||
if not isinstance(data, DatumInContext):
|
||||
return [DatumInContext(data, path=Root(), context=None)]
|
||||
else:
|
||||
if data.context is None:
|
||||
return [DatumInContext(data.value, context=None, path=Root())]
|
||||
else:
|
||||
return Root().find(data.context)
|
||||
|
||||
def update(self, data, val):
|
||||
return val
|
||||
|
||||
def __str__(self):
|
||||
return '$'
|
||||
|
||||
def __repr__(self):
|
||||
return 'Root()'
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, Root)
|
||||
|
||||
class This(JSONPath):
|
||||
"""
|
||||
The JSONPath referring to the current datum. Concrete syntax is '@'.
|
||||
"""
|
||||
|
||||
def find(self, datum):
|
||||
return [DatumInContext.wrap(datum)]
|
||||
|
||||
def update(self, data, val):
|
||||
return val
|
||||
|
||||
def __str__(self):
|
||||
return '`this`'
|
||||
|
||||
def __repr__(self):
|
||||
return 'This()'
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, This)
|
||||
|
||||
class Child(JSONPath):
|
||||
"""
|
||||
JSONPath that first matches the left, then the right.
|
||||
Concrete syntax is <left> '.' <right>
|
||||
"""
|
||||
|
||||
def __init__(self, left, right):
|
||||
self.left = left
|
||||
self.right = right
|
||||
|
||||
def find(self, datum):
|
||||
"""
|
||||
Extra special case: auto ids do not have children,
|
||||
so cut it off right now rather than auto id the auto id
|
||||
"""
|
||||
|
||||
return [submatch
|
||||
for subdata in self.left.find(datum)
|
||||
if not isinstance(subdata, AutoIdForDatum)
|
||||
for submatch in self.right.find(subdata)]
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, Child) and self.left == other.left and self.right == other.right
|
||||
|
||||
def __str__(self):
|
||||
return '%s.%s' % (self.left, self.right)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%r, %r)' % (self.__class__.__name__, self.left, self.right)
|
||||
|
||||
class Parent(JSONPath):
|
||||
"""
|
||||
JSONPath that matches the parent node of the current match.
|
||||
Will crash if no such parent exists.
|
||||
Available via named operator `parent`.
|
||||
"""
|
||||
|
||||
def find(self, datum):
|
||||
datum = DatumInContext.wrap(datum)
|
||||
return [datum.context]
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, Parent)
|
||||
|
||||
def __str__(self):
|
||||
return '`parent`'
|
||||
|
||||
def __repr__(self):
|
||||
return 'Parent()'
|
||||
|
||||
|
||||
class Where(JSONPath):
|
||||
"""
|
||||
JSONPath that first matches the left, and then
|
||||
filters for only those nodes that have
|
||||
a match on the right.
|
||||
|
||||
WARNING: Subject to change. May want to have "contains"
|
||||
or some other better word for it.
|
||||
"""
|
||||
|
||||
def __init__(self, left, right):
|
||||
self.left = left
|
||||
self.right = right
|
||||
|
||||
def find(self, data):
|
||||
return [subdata for subdata in self.left.find(data) if self.right.find(data)]
|
||||
|
||||
def __str__(self):
|
||||
return '%s where %s' % (self.left, self.right)
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, Where) and other.left == self.left and other.right == self.right
|
||||
|
||||
class Descendants(JSONPath):
|
||||
"""
|
||||
JSONPath that matches first the left expression then any descendant
|
||||
of it which matches the right expression.
|
||||
"""
|
||||
|
||||
def __init__(self, left, right):
|
||||
self.left = left
|
||||
self.right = right
|
||||
|
||||
def find(self, datum):
|
||||
# <left> .. <right> ==> <left> . (<right> | *..<right> | [*]..<right>)
|
||||
#
|
||||
# With with a wonky caveat that since Slice() has funky coercions
|
||||
# we cannot just delegate to that equivalence or we'll hit an
|
||||
# infinite loop. So right here we implement the coercion-free version.
|
||||
|
||||
# Get all left matches into a list
|
||||
left_matches = self.left.find(datum)
|
||||
if not isinstance(left_matches, list):
|
||||
left_matches = [left_matches]
|
||||
|
||||
def match_recursively(datum):
|
||||
right_matches = self.right.find(datum)
|
||||
|
||||
# Manually do the * or [*] to avoid coercion and recurse just the right-hand pattern
|
||||
if isinstance(datum.value, list):
|
||||
recursive_matches = [submatch
|
||||
for i in range(0, len(datum.value))
|
||||
for submatch in match_recursively(DatumInContext(datum.value[i], context=datum, path=Index(i)))]
|
||||
|
||||
elif isinstance(datum.value, dict):
|
||||
recursive_matches = [submatch
|
||||
for field in datum.value.keys()
|
||||
for submatch in match_recursively(DatumInContext(datum.value[field], context=datum, path=Fields(field)))]
|
||||
|
||||
else:
|
||||
recursive_matches = []
|
||||
|
||||
return right_matches + list(recursive_matches)
|
||||
|
||||
# TODO: repeatable iterator instead of list?
|
||||
return [submatch
|
||||
for left_match in left_matches
|
||||
for submatch in match_recursively(left_match)]
|
||||
|
||||
def is_singular():
|
||||
return False
|
||||
|
||||
def __str__(self):
|
||||
return '%s..%s' % (self.left, self.right)
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, Descendants) and self.left == other.left and self.right == other.right
|
||||
|
||||
class Union(JSONPath):
|
||||
"""
|
||||
JSONPath that returns the union of the results of each match.
|
||||
This is pretty shoddily implemented for now. The nicest semantics
|
||||
in case of mismatched bits (list vs atomic) is to put
|
||||
them all in a list, but I haven't done that yet.
|
||||
|
||||
WARNING: Any appearance of this being the _concatenation_ is
|
||||
coincidence. It may even be a bug! (or laziness)
|
||||
"""
|
||||
def __init__(self, left, right):
|
||||
self.left = left
|
||||
self.right = right
|
||||
|
||||
def is_singular(self):
|
||||
return False
|
||||
|
||||
def find(self, data):
|
||||
return self.left.find(data) + self.right.find(data)
|
||||
|
||||
class Intersect(JSONPath):
|
||||
"""
|
||||
JSONPath for bits that match *both* patterns.
|
||||
|
||||
This can be accomplished a couple of ways. The most
|
||||
efficient is to actually build the intersected
|
||||
AST as in building a state machine for matching the
|
||||
intersection of regular languages. The next
|
||||
idea is to build a filtered data and match against
|
||||
that.
|
||||
"""
|
||||
def __init__(self, left, right):
|
||||
self.left = left
|
||||
self.right = right
|
||||
|
||||
def is_singular(self):
|
||||
return False
|
||||
|
||||
def find(self, data):
|
||||
raise NotImplementedError()
|
||||
|
||||
class Fields(JSONPath):
|
||||
"""
|
||||
JSONPath referring to some field of the current object.
|
||||
Concrete syntax ix comma-separated field names.
|
||||
|
||||
WARNING: If '*' is any of the field names, then they will
|
||||
all be returned.
|
||||
"""
|
||||
|
||||
def __init__(self, *fields):
|
||||
self.fields = fields
|
||||
|
||||
def get_field_datum(self, datum, field):
|
||||
if field == auto_id_field:
|
||||
return AutoIdForDatum(datum)
|
||||
else:
|
||||
try:
|
||||
field_value = datum.value[field] # Do NOT use `val.get(field)` since that confuses None as a value and None due to `get`
|
||||
return DatumInContext(value=field_value, path=Fields(field), context=datum)
|
||||
except (TypeError, KeyError, AttributeError):
|
||||
return None
|
||||
|
||||
def reified_fields(self, datum):
|
||||
if '*' not in self.fields:
|
||||
return self.fields
|
||||
else:
|
||||
try:
|
||||
fields = tuple(datum.value.keys())
|
||||
return fields if auto_id_field is None else fields + (auto_id_field,)
|
||||
except AttributeError:
|
||||
return ()
|
||||
|
||||
def find(self, datum):
|
||||
datum = DatumInContext.wrap(datum)
|
||||
|
||||
return [field_datum
|
||||
for field_datum in [self.get_field_datum(datum, field) for field in self.reified_fields(datum)]
|
||||
if field_datum is not None]
|
||||
|
||||
def __str__(self):
|
||||
return ','.join(self.fields)
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(%s)' % (self.__class__.__name__, ','.join(map(repr, self.fields)))
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, Fields) and tuple(self.fields) == tuple(other.fields)
|
||||
|
||||
|
||||
class Index(JSONPath):
|
||||
"""
|
||||
JSONPath that matches indices of the current datum, or none if not large enough.
|
||||
Concrete syntax is brackets.
|
||||
|
||||
WARNING: If the datum is not long enough, it will not crash but will not match anything.
|
||||
NOTE: For the concrete syntax of `[*]`, the abstract syntax is a Slice() with no parameters (equiv to `[:]`
|
||||
"""
|
||||
|
||||
def __init__(self, index):
|
||||
self.index = index
|
||||
|
||||
def find(self, datum):
|
||||
datum = DatumInContext.wrap(datum)
|
||||
|
||||
if len(datum.value) > self.index:
|
||||
return [DatumInContext(datum.value[self.index], path=self, context=datum)]
|
||||
else:
|
||||
return []
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, Index) and self.index == other.index
|
||||
|
||||
def __str__(self):
|
||||
return '[%i]' % self.index
|
||||
|
||||
class Slice(JSONPath):
|
||||
"""
|
||||
JSONPath matching a slice of an array.
|
||||
|
||||
Because of a mismatch between JSON and XML when schema-unaware,
|
||||
this always returns an iterable; if the incoming data
|
||||
was not a list, then it returns a one element list _containing_ that
|
||||
data.
|
||||
|
||||
Consider these two docs, and their schema-unaware translation to JSON:
|
||||
|
||||
<a><b>hello</b></a> ==> {"a": {"b": "hello"}}
|
||||
<a><b>hello</b><b>goodbye</b></a> ==> {"a": {"b": ["hello", "goodbye"]}}
|
||||
|
||||
If there were a schema, it would be known that "b" should always be an
|
||||
array (unless the schema were wonky, but that is too much to fix here)
|
||||
so when querying with JSON if the one writing the JSON knows that it
|
||||
should be an array, they can write a slice operator and it will coerce
|
||||
a non-array value to an array.
|
||||
|
||||
This may be a bit unfortunate because it would be nice to always have
|
||||
an iterator, but dictionaries and other objects may also be iterable,
|
||||
so this is the compromise.
|
||||
"""
|
||||
def __init__(self, start=None, end=None, step=None):
|
||||
self.start = start
|
||||
self.end = end
|
||||
self.step = step
|
||||
|
||||
def find(self, datum):
|
||||
datum = DatumInContext.wrap(datum)
|
||||
|
||||
# Here's the hack. If it is a dictionary or some kind of constant,
|
||||
# put it in a single-element list
|
||||
if (isinstance(datum.value, dict) or isinstance(datum.value, six.integer_types) or isinstance(datum.value, six.string_types)):
|
||||
return self.find(DatumInContext([datum.value], path=datum.path, context=datum.context))
|
||||
|
||||
# Some iterators do not support slicing but we can still
|
||||
# at least work for '*'
|
||||
if self.start == None and self.end == None and self.step == None:
|
||||
return [DatumInContext(datum.value[i], path=Index(i), context=datum) for i in xrange(0, len(datum.value))]
|
||||
else:
|
||||
return [DatumInContext(datum.value[i], path=Index(i), context=datum) for i in range(0, len(datum.value))[self.start:self.end:self.step]]
|
||||
|
||||
def __str__(self):
|
||||
if self.start == None and self.end == None and self.step == None:
|
||||
return '[*]'
|
||||
else:
|
||||
return '[%s%s%s]' % (self.start or '',
|
||||
':%d'%self.end if self.end else '',
|
||||
':%d'%self.step if self.step else '')
|
||||
|
||||
def __repr__(self):
|
||||
return '%s(start=%r,end=%r,step=%r)' % (self.__class__.__name__, self.start, self.end, self.step)
|
||||
|
||||
def __eq__(self, other):
|
||||
return isinstance(other, Slice) and other.start == self.start and self.end == other.end and other.step == self.step
|
||||
171
bin/jsonpath_rw/lexer.py
Normal file
171
bin/jsonpath_rw/lexer.py
Normal file
@@ -0,0 +1,171 @@
|
||||
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
|
||||
import sys
|
||||
import logging
|
||||
|
||||
import ply.lex
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class JsonPathLexerError(Exception):
|
||||
pass
|
||||
|
||||
class JsonPathLexer(object):
|
||||
'''
|
||||
A Lexical analyzer for JsonPath.
|
||||
'''
|
||||
|
||||
def __init__(self, debug=False):
|
||||
self.debug = debug
|
||||
if self.__doc__ == None:
|
||||
raise JsonPathLexerError('Docstrings have been removed! By design of PLY, jsonpath-rw requires docstrings. You must not use PYTHONOPTIMIZE=2 or python -OO.')
|
||||
|
||||
def tokenize(self, string):
|
||||
'''
|
||||
Maps a string to an iterator over tokens. In other words: [char] -> [token]
|
||||
'''
|
||||
|
||||
new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger)
|
||||
new_lexer.latest_newline = 0
|
||||
new_lexer.string_value = None
|
||||
new_lexer.input(string)
|
||||
|
||||
while True:
|
||||
t = new_lexer.token()
|
||||
if t is None: break
|
||||
t.col = t.lexpos - new_lexer.latest_newline
|
||||
yield t
|
||||
|
||||
if new_lexer.string_value is not None:
|
||||
raise JsonPathLexerError('Unexpected EOF in string literal or identifier')
|
||||
|
||||
# ============== PLY Lexer specification ==================
|
||||
#
|
||||
# This probably should be private but:
|
||||
# - the parser requires access to `tokens` (perhaps they should be defined in a third, shared dependency)
|
||||
# - things like `literals` might be a legitimate part of the public interface.
|
||||
#
|
||||
# Anyhow, it is pythonic to give some rope to hang oneself with :-)
|
||||
|
||||
literals = ['*', '.', '[', ']', '(', ')', '$', ',', ':', '|', '&']
|
||||
|
||||
reserved_words = { 'where': 'WHERE' }
|
||||
|
||||
tokens = ['DOUBLEDOT', 'NUMBER', 'ID', 'NAMED_OPERATOR'] + list(reserved_words.values())
|
||||
|
||||
states = [ ('singlequote', 'exclusive'),
|
||||
('doublequote', 'exclusive'),
|
||||
('backquote', 'exclusive') ]
|
||||
|
||||
# Normal lexing, rather easy
|
||||
t_DOUBLEDOT = r'\.\.'
|
||||
t_ignore = ' \t'
|
||||
|
||||
def t_ID(self, t):
|
||||
r'[a-zA-Z_@][a-zA-Z0-9_@\-]*'
|
||||
t.type = self.reserved_words.get(t.value, 'ID')
|
||||
return t
|
||||
|
||||
def t_NUMBER(self, t):
|
||||
r'-?\d+'
|
||||
t.value = int(t.value)
|
||||
return t
|
||||
|
||||
|
||||
# Single-quoted strings
|
||||
t_singlequote_ignore = ''
|
||||
def t_singlequote(self, t):
|
||||
r"'"
|
||||
t.lexer.string_start = t.lexer.lexpos
|
||||
t.lexer.string_value = ''
|
||||
t.lexer.push_state('singlequote')
|
||||
|
||||
def t_singlequote_content(self, t):
|
||||
r"[^'\\]+"
|
||||
t.lexer.string_value += t.value
|
||||
|
||||
def t_singlequote_escape(self, t):
|
||||
r'\\.'
|
||||
t.lexer.string_value += t.value[1]
|
||||
|
||||
def t_singlequote_end(self, t):
|
||||
r"'"
|
||||
t.value = t.lexer.string_value
|
||||
t.type = 'ID'
|
||||
t.lexer.string_value = None
|
||||
t.lexer.pop_state()
|
||||
return t
|
||||
|
||||
def t_singlequote_error(self, t):
|
||||
raise JsonPathLexerError('Error on line %s, col %s while lexing singlequoted field: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
|
||||
|
||||
|
||||
# Double-quoted strings
|
||||
t_doublequote_ignore = ''
|
||||
def t_doublequote(self, t):
|
||||
r'"'
|
||||
t.lexer.string_start = t.lexer.lexpos
|
||||
t.lexer.string_value = ''
|
||||
t.lexer.push_state('doublequote')
|
||||
|
||||
def t_doublequote_content(self, t):
|
||||
r'[^"\\]+'
|
||||
t.lexer.string_value += t.value
|
||||
|
||||
def t_doublequote_escape(self, t):
|
||||
r'\\.'
|
||||
t.lexer.string_value += t.value[1]
|
||||
|
||||
def t_doublequote_end(self, t):
|
||||
r'"'
|
||||
t.value = t.lexer.string_value
|
||||
t.type = 'ID'
|
||||
t.lexer.string_value = None
|
||||
t.lexer.pop_state()
|
||||
return t
|
||||
|
||||
def t_doublequote_error(self, t):
|
||||
raise JsonPathLexerError('Error on line %s, col %s while lexing doublequoted field: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
|
||||
|
||||
|
||||
# Back-quoted "magic" operators
|
||||
t_backquote_ignore = ''
|
||||
def t_backquote(self, t):
|
||||
r'`'
|
||||
t.lexer.string_start = t.lexer.lexpos
|
||||
t.lexer.string_value = ''
|
||||
t.lexer.push_state('backquote')
|
||||
|
||||
def t_backquote_escape(self, t):
|
||||
r'\\.'
|
||||
t.lexer.string_value += t.value[1]
|
||||
|
||||
def t_backquote_content(self, t):
|
||||
r"[^`\\]+"
|
||||
t.lexer.string_value += t.value
|
||||
|
||||
def t_backquote_end(self, t):
|
||||
r'`'
|
||||
t.value = t.lexer.string_value
|
||||
t.type = 'NAMED_OPERATOR'
|
||||
t.lexer.string_value = None
|
||||
t.lexer.pop_state()
|
||||
return t
|
||||
|
||||
def t_backquote_error(self, t):
|
||||
raise JsonPathLexerError('Error on line %s, col %s while lexing backquoted operator: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
|
||||
|
||||
|
||||
# Counting lines, handling errors
|
||||
def t_newline(self, t):
|
||||
r'\n'
|
||||
t.lexer.lineno += 1
|
||||
t.lexer.latest_newline = t.lexpos
|
||||
|
||||
def t_error(self, t):
|
||||
raise JsonPathLexerError('Error on line %s, col %s: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig()
|
||||
lexer = JsonPathLexer(debug=True)
|
||||
for token in lexer.tokenize(sys.stdin.read()):
|
||||
print('%-20s%s' % (token.value, token.type))
|
||||
187
bin/jsonpath_rw/parser.py
Normal file
187
bin/jsonpath_rw/parser.py
Normal file
@@ -0,0 +1,187 @@
|
||||
from __future__ import print_function, absolute_import, division, generators, nested_scopes
|
||||
import sys
|
||||
import os.path
|
||||
import logging
|
||||
|
||||
import ply.yacc
|
||||
|
||||
from jsonpath_rw.jsonpath import *
|
||||
from jsonpath_rw.lexer import JsonPathLexer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def parse(string):
|
||||
return JsonPathParser().parse(string)
|
||||
|
||||
class JsonPathParser(object):
|
||||
'''
|
||||
An LALR-parser for JsonPath
|
||||
'''
|
||||
|
||||
tokens = JsonPathLexer.tokens
|
||||
|
||||
def __init__(self, debug=False, lexer_class=None):
|
||||
if self.__doc__ == None:
|
||||
raise Exception('Docstrings have been removed! By design of PLY, jsonpath-rw requires docstrings. You must not use PYTHONOPTIMIZE=2 or python -OO.')
|
||||
|
||||
self.debug = debug
|
||||
self.lexer_class = lexer_class or JsonPathLexer # Crufty but works around statefulness in PLY
|
||||
|
||||
def parse(self, string, lexer = None):
|
||||
lexer = lexer or self.lexer_class()
|
||||
return self.parse_token_stream(lexer.tokenize(string))
|
||||
|
||||
def parse_token_stream(self, token_iterator, start_symbol='jsonpath'):
|
||||
|
||||
# Since PLY has some crufty aspects and dumps files, we try to keep them local
|
||||
# However, we need to derive the name of the output Python file :-/
|
||||
output_directory = os.path.dirname(__file__)
|
||||
try:
|
||||
module_name = os.path.splitext(os.path.split(__file__)[1])[0]
|
||||
except:
|
||||
module_name = __name__
|
||||
|
||||
parsing_table_module = '_'.join([module_name, start_symbol, 'parsetab'])
|
||||
|
||||
# And we regenerate the parse table every time; it doesn't actually take that long!
|
||||
new_parser = ply.yacc.yacc(module=self,
|
||||
debug=self.debug,
|
||||
tabmodule = parsing_table_module,
|
||||
outputdir = output_directory,
|
||||
write_tables=0,
|
||||
start = start_symbol,
|
||||
errorlog = logger)
|
||||
|
||||
return new_parser.parse(lexer = IteratorToTokenStream(token_iterator))
|
||||
|
||||
# ===================== PLY Parser specification =====================
|
||||
|
||||
precedence = [
|
||||
('left', ','),
|
||||
('left', 'DOUBLEDOT'),
|
||||
('left', '.'),
|
||||
('left', '|'),
|
||||
('left', '&'),
|
||||
('left', 'WHERE'),
|
||||
]
|
||||
|
||||
def p_error(self, t):
|
||||
raise Exception('Parse error at %s:%s near token %s (%s)' % (t.lineno, t.col, t.value, t.type))
|
||||
|
||||
def p_jsonpath_binop(self, p):
|
||||
"""jsonpath : jsonpath '.' jsonpath
|
||||
| jsonpath DOUBLEDOT jsonpath
|
||||
| jsonpath WHERE jsonpath
|
||||
| jsonpath '|' jsonpath
|
||||
| jsonpath '&' jsonpath"""
|
||||
op = p[2]
|
||||
|
||||
if op == '.':
|
||||
p[0] = Child(p[1], p[3])
|
||||
elif op == '..':
|
||||
p[0] = Descendants(p[1], p[3])
|
||||
elif op == 'where':
|
||||
p[0] = Where(p[1], p[3])
|
||||
elif op == '|':
|
||||
p[0] = Union(p[1], p[3])
|
||||
elif op == '&':
|
||||
p[0] = Intersect(p[1], p[3])
|
||||
|
||||
def p_jsonpath_fields(self, p):
|
||||
"jsonpath : fields_or_any"
|
||||
p[0] = Fields(*p[1])
|
||||
|
||||
def p_jsonpath_named_operator(self, p):
|
||||
"jsonpath : NAMED_OPERATOR"
|
||||
if p[1] == 'this':
|
||||
p[0] = This()
|
||||
elif p[1] == 'parent':
|
||||
p[0] = Parent()
|
||||
else:
|
||||
raise Exception('Unknown named operator `%s` at %s:%s' % (p[1], p.lineno(1), p.lexpos(1)))
|
||||
|
||||
def p_jsonpath_root(self, p):
|
||||
"jsonpath : '$'"
|
||||
p[0] = Root()
|
||||
|
||||
def p_jsonpath_idx(self, p):
|
||||
"jsonpath : '[' idx ']'"
|
||||
p[0] = p[2]
|
||||
|
||||
def p_jsonpath_slice(self, p):
|
||||
"jsonpath : '[' slice ']'"
|
||||
p[0] = p[2]
|
||||
|
||||
def p_jsonpath_fieldbrackets(self, p):
|
||||
"jsonpath : '[' fields ']'"
|
||||
p[0] = Fields(*p[2])
|
||||
|
||||
def p_jsonpath_child_fieldbrackets(self, p):
|
||||
"jsonpath : jsonpath '[' fields ']'"
|
||||
p[0] = Child(p[1], Fields(*p[3]))
|
||||
|
||||
def p_jsonpath_child_idxbrackets(self, p):
|
||||
"jsonpath : jsonpath '[' idx ']'"
|
||||
p[0] = Child(p[1], p[3])
|
||||
|
||||
def p_jsonpath_child_slicebrackets(self, p):
|
||||
"jsonpath : jsonpath '[' slice ']'"
|
||||
p[0] = Child(p[1], p[3])
|
||||
|
||||
def p_jsonpath_parens(self, p):
|
||||
"jsonpath : '(' jsonpath ')'"
|
||||
p[0] = p[2]
|
||||
|
||||
# Because fields in brackets cannot be '*' - that is reserved for array indices
|
||||
def p_fields_or_any(self, p):
|
||||
"""fields_or_any : fields
|
||||
| '*' """
|
||||
if p[1] == '*':
|
||||
p[0] = ['*']
|
||||
else:
|
||||
p[0] = p[1]
|
||||
|
||||
def p_fields_id(self, p):
|
||||
"fields : ID"
|
||||
p[0] = [p[1]]
|
||||
|
||||
def p_fields_comma(self, p):
|
||||
"fields : fields ',' fields"
|
||||
p[0] = p[1] + p[3]
|
||||
|
||||
def p_idx(self, p):
|
||||
"idx : NUMBER"
|
||||
p[0] = Index(p[1])
|
||||
|
||||
def p_slice_any(self, p):
|
||||
"slice : '*'"
|
||||
p[0] = Slice()
|
||||
|
||||
def p_slice(self, p): # Currently does not support `step`
|
||||
"slice : maybe_int ':' maybe_int"
|
||||
p[0] = Slice(start=p[1], end=p[3])
|
||||
|
||||
def p_maybe_int(self, p):
|
||||
"""maybe_int : NUMBER
|
||||
| empty"""
|
||||
p[0] = p[1]
|
||||
|
||||
def p_empty(self, p):
|
||||
'empty :'
|
||||
p[0] = None
|
||||
|
||||
class IteratorToTokenStream(object):
|
||||
def __init__(self, iterator):
|
||||
self.iterator = iterator
|
||||
|
||||
def token(self):
|
||||
try:
|
||||
return next(self.iterator)
|
||||
except StopIteration:
|
||||
return None
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.basicConfig()
|
||||
parser = JsonPathParser(debug=True)
|
||||
print(parser.parse(sys.stdin.read()))
|
||||
4
bin/ply/__init__.py
Normal file
4
bin/ply/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# PLY package
|
||||
# Author: David Beazley (dave@dabeaz.com)
|
||||
|
||||
__all__ = ['lex','yacc']
|
||||
898
bin/ply/cpp.py
Normal file
898
bin/ply/cpp.py
Normal file
@@ -0,0 +1,898 @@
|
||||
# -----------------------------------------------------------------------------
|
||||
# cpp.py
|
||||
#
|
||||
# Author: David Beazley (http://www.dabeaz.com)
|
||||
# Copyright (C) 2007
|
||||
# All rights reserved
|
||||
#
|
||||
# This module implements an ANSI-C style lexical preprocessor for PLY.
|
||||
# -----------------------------------------------------------------------------
|
||||
from __future__ import generators
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Default preprocessor lexer definitions. These tokens are enough to get
|
||||
# a basic preprocessor working. Other modules may import these if they want
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
tokens = (
|
||||
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT', 'CPP_POUND','CPP_DPOUND'
|
||||
)
|
||||
|
||||
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
|
||||
|
||||
# Whitespace
|
||||
def t_CPP_WS(t):
|
||||
r'\s+'
|
||||
t.lexer.lineno += t.value.count("\n")
|
||||
return t
|
||||
|
||||
t_CPP_POUND = r'\#'
|
||||
t_CPP_DPOUND = r'\#\#'
|
||||
|
||||
# Identifier
|
||||
t_CPP_ID = r'[A-Za-z_][\w_]*'
|
||||
|
||||
# Integer literal
|
||||
def CPP_INTEGER(t):
|
||||
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU]|[lL]|[uU][lL]|[lL][uU])?)'
|
||||
return t
|
||||
|
||||
t_CPP_INTEGER = CPP_INTEGER
|
||||
|
||||
# Floating literal
|
||||
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
|
||||
|
||||
# String literal
|
||||
def t_CPP_STRING(t):
|
||||
r'\"([^\\\n]|(\\(.|\n)))*?\"'
|
||||
t.lexer.lineno += t.value.count("\n")
|
||||
return t
|
||||
|
||||
# Character constant 'c' or L'c'
|
||||
def t_CPP_CHAR(t):
|
||||
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
|
||||
t.lexer.lineno += t.value.count("\n")
|
||||
return t
|
||||
|
||||
# Comment
|
||||
def t_CPP_COMMENT(t):
|
||||
r'(/\*(.|\n)*?\*/)|(//.*?\n)'
|
||||
t.lexer.lineno += t.value.count("\n")
|
||||
return t
|
||||
|
||||
def t_error(t):
|
||||
t.type = t.value[0]
|
||||
t.value = t.value[0]
|
||||
t.lexer.skip(1)
|
||||
return t
|
||||
|
||||
import re
|
||||
import copy
|
||||
import time
|
||||
import os.path
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# trigraph()
|
||||
#
|
||||
# Given an input string, this function replaces all trigraph sequences.
|
||||
# The following mapping is used:
|
||||
#
|
||||
# ??= #
|
||||
# ??/ \
|
||||
# ??' ^
|
||||
# ??( [
|
||||
# ??) ]
|
||||
# ??! |
|
||||
# ??< {
|
||||
# ??> }
|
||||
# ??- ~
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
|
||||
_trigraph_rep = {
|
||||
'=':'#',
|
||||
'/':'\\',
|
||||
"'":'^',
|
||||
'(':'[',
|
||||
')':']',
|
||||
'!':'|',
|
||||
'<':'{',
|
||||
'>':'}',
|
||||
'-':'~'
|
||||
}
|
||||
|
||||
def trigraph(input):
|
||||
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Macro object
|
||||
#
|
||||
# This object holds information about preprocessor macros
|
||||
#
|
||||
# .name - Macro name (string)
|
||||
# .value - Macro value (a list of tokens)
|
||||
# .arglist - List of argument names
|
||||
# .variadic - Boolean indicating whether or not variadic macro
|
||||
# .vararg - Name of the variadic parameter
|
||||
#
|
||||
# When a macro is created, the macro replacement token sequence is
|
||||
# pre-scanned and used to create patch lists that are later used
|
||||
# during macro expansion
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
class Macro(object):
|
||||
def __init__(self,name,value,arglist=None,variadic=False):
|
||||
self.name = name
|
||||
self.value = value
|
||||
self.arglist = arglist
|
||||
self.variadic = variadic
|
||||
if variadic:
|
||||
self.vararg = arglist[-1]
|
||||
self.source = None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Preprocessor object
|
||||
#
|
||||
# Object representing a preprocessor. Contains macro definitions,
|
||||
# include directories, and other information
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
class Preprocessor(object):
|
||||
def __init__(self,lexer=None):
|
||||
if lexer is None:
|
||||
lexer = lex.lexer
|
||||
self.lexer = lexer
|
||||
self.macros = { }
|
||||
self.path = []
|
||||
self.temp_path = []
|
||||
|
||||
# Probe the lexer for selected tokens
|
||||
self.lexprobe()
|
||||
|
||||
tm = time.localtime()
|
||||
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
|
||||
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
|
||||
self.parser = None
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# tokenize()
|
||||
#
|
||||
# Utility function. Given a string of text, tokenize into a list of tokens
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
def tokenize(self,text):
|
||||
tokens = []
|
||||
self.lexer.input(text)
|
||||
while True:
|
||||
tok = self.lexer.token()
|
||||
if not tok: break
|
||||
tokens.append(tok)
|
||||
return tokens
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# error()
|
||||
#
|
||||
# Report a preprocessor error/warning of some kind
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def error(self,file,line,msg):
|
||||
print("%s:%d %s" % (file,line,msg))
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# lexprobe()
|
||||
#
|
||||
# This method probes the preprocessor lexer object to discover
|
||||
# the token types of symbols that are important to the preprocessor.
|
||||
# If this works right, the preprocessor will simply "work"
|
||||
# with any suitable lexer regardless of how tokens have been named.
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def lexprobe(self):
|
||||
|
||||
# Determine the token type for identifiers
|
||||
self.lexer.input("identifier")
|
||||
tok = self.lexer.token()
|
||||
if not tok or tok.value != "identifier":
|
||||
print("Couldn't determine identifier type")
|
||||
else:
|
||||
self.t_ID = tok.type
|
||||
|
||||
# Determine the token type for integers
|
||||
self.lexer.input("12345")
|
||||
tok = self.lexer.token()
|
||||
if not tok or int(tok.value) != 12345:
|
||||
print("Couldn't determine integer type")
|
||||
else:
|
||||
self.t_INTEGER = tok.type
|
||||
self.t_INTEGER_TYPE = type(tok.value)
|
||||
|
||||
# Determine the token type for strings enclosed in double quotes
|
||||
self.lexer.input("\"filename\"")
|
||||
tok = self.lexer.token()
|
||||
if not tok or tok.value != "\"filename\"":
|
||||
print("Couldn't determine string type")
|
||||
else:
|
||||
self.t_STRING = tok.type
|
||||
|
||||
# Determine the token type for whitespace--if any
|
||||
self.lexer.input(" ")
|
||||
tok = self.lexer.token()
|
||||
if not tok or tok.value != " ":
|
||||
self.t_SPACE = None
|
||||
else:
|
||||
self.t_SPACE = tok.type
|
||||
|
||||
# Determine the token type for newlines
|
||||
self.lexer.input("\n")
|
||||
tok = self.lexer.token()
|
||||
if not tok or tok.value != "\n":
|
||||
self.t_NEWLINE = None
|
||||
print("Couldn't determine token for newlines")
|
||||
else:
|
||||
self.t_NEWLINE = tok.type
|
||||
|
||||
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
|
||||
|
||||
# Check for other characters used by the preprocessor
|
||||
chars = [ '<','>','#','##','\\','(',')',',','.']
|
||||
for c in chars:
|
||||
self.lexer.input(c)
|
||||
tok = self.lexer.token()
|
||||
if not tok or tok.value != c:
|
||||
print("Unable to lex '%s' required for preprocessor" % c)
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# add_path()
|
||||
#
|
||||
# Adds a search path to the preprocessor.
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def add_path(self,path):
|
||||
self.path.append(path)
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# group_lines()
|
||||
#
|
||||
# Given an input string, this function splits it into lines. Trailing whitespace
|
||||
# is removed. Any line ending with \ is grouped with the next line. This
|
||||
# function forms the lowest level of the preprocessor---grouping into text into
|
||||
# a line-by-line format.
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def group_lines(self,input):
|
||||
lex = self.lexer.clone()
|
||||
lines = [x.rstrip() for x in input.splitlines()]
|
||||
for i in xrange(len(lines)):
|
||||
j = i+1
|
||||
while lines[i].endswith('\\') and (j < len(lines)):
|
||||
lines[i] = lines[i][:-1]+lines[j]
|
||||
lines[j] = ""
|
||||
j += 1
|
||||
|
||||
input = "\n".join(lines)
|
||||
lex.input(input)
|
||||
lex.lineno = 1
|
||||
|
||||
current_line = []
|
||||
while True:
|
||||
tok = lex.token()
|
||||
if not tok:
|
||||
break
|
||||
current_line.append(tok)
|
||||
if tok.type in self.t_WS and '\n' in tok.value:
|
||||
yield current_line
|
||||
current_line = []
|
||||
|
||||
if current_line:
|
||||
yield current_line
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# tokenstrip()
|
||||
#
|
||||
# Remove leading/trailing whitespace tokens from a token list
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def tokenstrip(self,tokens):
|
||||
i = 0
|
||||
while i < len(tokens) and tokens[i].type in self.t_WS:
|
||||
i += 1
|
||||
del tokens[:i]
|
||||
i = len(tokens)-1
|
||||
while i >= 0 and tokens[i].type in self.t_WS:
|
||||
i -= 1
|
||||
del tokens[i+1:]
|
||||
return tokens
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# collect_args()
|
||||
#
|
||||
# Collects comma separated arguments from a list of tokens. The arguments
|
||||
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
|
||||
# where tokencount is the number of tokens consumed, args is a list of arguments,
|
||||
# and positions is a list of integers containing the starting index of each
|
||||
# argument. Each argument is represented by a list of tokens.
|
||||
#
|
||||
# When collecting arguments, leading and trailing whitespace is removed
|
||||
# from each argument.
|
||||
#
|
||||
# This function properly handles nested parenthesis and commas---these do not
|
||||
# define new arguments.
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def collect_args(self,tokenlist):
|
||||
args = []
|
||||
positions = []
|
||||
current_arg = []
|
||||
nesting = 1
|
||||
tokenlen = len(tokenlist)
|
||||
|
||||
# Search for the opening '('.
|
||||
i = 0
|
||||
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
|
||||
i += 1
|
||||
|
||||
if (i < tokenlen) and (tokenlist[i].value == '('):
|
||||
positions.append(i+1)
|
||||
else:
|
||||
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
|
||||
return 0, [], []
|
||||
|
||||
i += 1
|
||||
|
||||
while i < tokenlen:
|
||||
t = tokenlist[i]
|
||||
if t.value == '(':
|
||||
current_arg.append(t)
|
||||
nesting += 1
|
||||
elif t.value == ')':
|
||||
nesting -= 1
|
||||
if nesting == 0:
|
||||
if current_arg:
|
||||
args.append(self.tokenstrip(current_arg))
|
||||
positions.append(i)
|
||||
return i+1,args,positions
|
||||
current_arg.append(t)
|
||||
elif t.value == ',' and nesting == 1:
|
||||
args.append(self.tokenstrip(current_arg))
|
||||
positions.append(i+1)
|
||||
current_arg = []
|
||||
else:
|
||||
current_arg.append(t)
|
||||
i += 1
|
||||
|
||||
# Missing end argument
|
||||
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
|
||||
return 0, [],[]
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# macro_prescan()
|
||||
#
|
||||
# Examine the macro value (token sequence) and identify patch points
|
||||
# This is used to speed up macro expansion later on---we'll know
|
||||
# right away where to apply patches to the value to form the expansion
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def macro_prescan(self,macro):
|
||||
macro.patch = [] # Standard macro arguments
|
||||
macro.str_patch = [] # String conversion expansion
|
||||
macro.var_comma_patch = [] # Variadic macro comma patch
|
||||
i = 0
|
||||
while i < len(macro.value):
|
||||
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
|
||||
argnum = macro.arglist.index(macro.value[i].value)
|
||||
# Conversion of argument to a string
|
||||
if i > 0 and macro.value[i-1].value == '#':
|
||||
macro.value[i] = copy.copy(macro.value[i])
|
||||
macro.value[i].type = self.t_STRING
|
||||
del macro.value[i-1]
|
||||
macro.str_patch.append((argnum,i-1))
|
||||
continue
|
||||
# Concatenation
|
||||
elif (i > 0 and macro.value[i-1].value == '##'):
|
||||
macro.patch.append(('c',argnum,i-1))
|
||||
del macro.value[i-1]
|
||||
continue
|
||||
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
|
||||
macro.patch.append(('c',argnum,i))
|
||||
i += 1
|
||||
continue
|
||||
# Standard expansion
|
||||
else:
|
||||
macro.patch.append(('e',argnum,i))
|
||||
elif macro.value[i].value == '##':
|
||||
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
|
||||
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
|
||||
(macro.value[i+1].value == macro.vararg):
|
||||
macro.var_comma_patch.append(i-1)
|
||||
i += 1
|
||||
macro.patch.sort(key=lambda x: x[2],reverse=True)
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# macro_expand_args()
|
||||
#
|
||||
# Given a Macro and list of arguments (each a token list), this method
|
||||
# returns an expanded version of a macro. The return value is a token sequence
|
||||
# representing the replacement macro tokens
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def macro_expand_args(self,macro,args):
|
||||
# Make a copy of the macro token sequence
|
||||
rep = [copy.copy(_x) for _x in macro.value]
|
||||
|
||||
# Make string expansion patches. These do not alter the length of the replacement sequence
|
||||
|
||||
str_expansion = {}
|
||||
for argnum, i in macro.str_patch:
|
||||
if argnum not in str_expansion:
|
||||
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
|
||||
rep[i] = copy.copy(rep[i])
|
||||
rep[i].value = str_expansion[argnum]
|
||||
|
||||
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
|
||||
comma_patch = False
|
||||
if macro.variadic and not args[-1]:
|
||||
for i in macro.var_comma_patch:
|
||||
rep[i] = None
|
||||
comma_patch = True
|
||||
|
||||
# Make all other patches. The order of these matters. It is assumed that the patch list
|
||||
# has been sorted in reverse order of patch location since replacements will cause the
|
||||
# size of the replacement sequence to expand from the patch point.
|
||||
|
||||
expanded = { }
|
||||
for ptype, argnum, i in macro.patch:
|
||||
# Concatenation. Argument is left unexpanded
|
||||
if ptype == 'c':
|
||||
rep[i:i+1] = args[argnum]
|
||||
# Normal expansion. Argument is macro expanded first
|
||||
elif ptype == 'e':
|
||||
if argnum not in expanded:
|
||||
expanded[argnum] = self.expand_macros(args[argnum])
|
||||
rep[i:i+1] = expanded[argnum]
|
||||
|
||||
# Get rid of removed comma if necessary
|
||||
if comma_patch:
|
||||
rep = [_i for _i in rep if _i]
|
||||
|
||||
return rep
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# expand_macros()
|
||||
#
|
||||
# Given a list of tokens, this function performs macro expansion.
|
||||
# The expanded argument is a dictionary that contains macros already
|
||||
# expanded. This is used to prevent infinite recursion.
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def expand_macros(self,tokens,expanded=None):
|
||||
if expanded is None:
|
||||
expanded = {}
|
||||
i = 0
|
||||
while i < len(tokens):
|
||||
t = tokens[i]
|
||||
if t.type == self.t_ID:
|
||||
if t.value in self.macros and t.value not in expanded:
|
||||
# Yes, we found a macro match
|
||||
expanded[t.value] = True
|
||||
|
||||
m = self.macros[t.value]
|
||||
if not m.arglist:
|
||||
# A simple macro
|
||||
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
|
||||
for e in ex:
|
||||
e.lineno = t.lineno
|
||||
tokens[i:i+1] = ex
|
||||
i += len(ex)
|
||||
else:
|
||||
# A macro with arguments
|
||||
j = i + 1
|
||||
while j < len(tokens) and tokens[j].type in self.t_WS:
|
||||
j += 1
|
||||
if tokens[j].value == '(':
|
||||
tokcount,args,positions = self.collect_args(tokens[j:])
|
||||
if not m.variadic and len(args) != len(m.arglist):
|
||||
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
|
||||
i = j + tokcount
|
||||
elif m.variadic and len(args) < len(m.arglist)-1:
|
||||
if len(m.arglist) > 2:
|
||||
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
|
||||
else:
|
||||
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
|
||||
i = j + tokcount
|
||||
else:
|
||||
if m.variadic:
|
||||
if len(args) == len(m.arglist)-1:
|
||||
args.append([])
|
||||
else:
|
||||
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
|
||||
del args[len(m.arglist):]
|
||||
|
||||
# Get macro replacement text
|
||||
rep = self.macro_expand_args(m,args)
|
||||
rep = self.expand_macros(rep,expanded)
|
||||
for r in rep:
|
||||
r.lineno = t.lineno
|
||||
tokens[i:j+tokcount] = rep
|
||||
i += len(rep)
|
||||
del expanded[t.value]
|
||||
continue
|
||||
elif t.value == '__LINE__':
|
||||
t.type = self.t_INTEGER
|
||||
t.value = self.t_INTEGER_TYPE(t.lineno)
|
||||
|
||||
i += 1
|
||||
return tokens
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# evalexpr()
|
||||
#
|
||||
# Evaluate an expression token sequence for the purposes of evaluating
|
||||
# integral expressions.
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def evalexpr(self,tokens):
|
||||
# tokens = tokenize(line)
|
||||
# Search for defined macros
|
||||
i = 0
|
||||
while i < len(tokens):
|
||||
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
|
||||
j = i + 1
|
||||
needparen = False
|
||||
result = "0L"
|
||||
while j < len(tokens):
|
||||
if tokens[j].type in self.t_WS:
|
||||
j += 1
|
||||
continue
|
||||
elif tokens[j].type == self.t_ID:
|
||||
if tokens[j].value in self.macros:
|
||||
result = "1L"
|
||||
else:
|
||||
result = "0L"
|
||||
if not needparen: break
|
||||
elif tokens[j].value == '(':
|
||||
needparen = True
|
||||
elif tokens[j].value == ')':
|
||||
break
|
||||
else:
|
||||
self.error(self.source,tokens[i].lineno,"Malformed defined()")
|
||||
j += 1
|
||||
tokens[i].type = self.t_INTEGER
|
||||
tokens[i].value = self.t_INTEGER_TYPE(result)
|
||||
del tokens[i+1:j+1]
|
||||
i += 1
|
||||
tokens = self.expand_macros(tokens)
|
||||
for i,t in enumerate(tokens):
|
||||
if t.type == self.t_ID:
|
||||
tokens[i] = copy.copy(t)
|
||||
tokens[i].type = self.t_INTEGER
|
||||
tokens[i].value = self.t_INTEGER_TYPE("0L")
|
||||
elif t.type == self.t_INTEGER:
|
||||
tokens[i] = copy.copy(t)
|
||||
# Strip off any trailing suffixes
|
||||
tokens[i].value = str(tokens[i].value)
|
||||
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
|
||||
tokens[i].value = tokens[i].value[:-1]
|
||||
|
||||
expr = "".join([str(x.value) for x in tokens])
|
||||
expr = expr.replace("&&"," and ")
|
||||
expr = expr.replace("||"," or ")
|
||||
expr = expr.replace("!"," not ")
|
||||
try:
|
||||
result = eval(expr)
|
||||
except StandardError:
|
||||
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
|
||||
result = 0
|
||||
return result
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# parsegen()
|
||||
#
|
||||
# Parse an input string/
|
||||
# ----------------------------------------------------------------------
|
||||
def parsegen(self,input,source=None):
|
||||
|
||||
# Replace trigraph sequences
|
||||
t = trigraph(input)
|
||||
lines = self.group_lines(t)
|
||||
|
||||
if not source:
|
||||
source = ""
|
||||
|
||||
self.define("__FILE__ \"%s\"" % source)
|
||||
|
||||
self.source = source
|
||||
chunk = []
|
||||
enable = True
|
||||
iftrigger = False
|
||||
ifstack = []
|
||||
|
||||
for x in lines:
|
||||
for i,tok in enumerate(x):
|
||||
if tok.type not in self.t_WS: break
|
||||
if tok.value == '#':
|
||||
# Preprocessor directive
|
||||
|
||||
for tok in x:
|
||||
if tok in self.t_WS and '\n' in tok.value:
|
||||
chunk.append(tok)
|
||||
|
||||
dirtokens = self.tokenstrip(x[i+1:])
|
||||
if dirtokens:
|
||||
name = dirtokens[0].value
|
||||
args = self.tokenstrip(dirtokens[1:])
|
||||
else:
|
||||
name = ""
|
||||
args = []
|
||||
|
||||
if name == 'define':
|
||||
if enable:
|
||||
for tok in self.expand_macros(chunk):
|
||||
yield tok
|
||||
chunk = []
|
||||
self.define(args)
|
||||
elif name == 'include':
|
||||
if enable:
|
||||
for tok in self.expand_macros(chunk):
|
||||
yield tok
|
||||
chunk = []
|
||||
oldfile = self.macros['__FILE__']
|
||||
for tok in self.include(args):
|
||||
yield tok
|
||||
self.macros['__FILE__'] = oldfile
|
||||
self.source = source
|
||||
elif name == 'undef':
|
||||
if enable:
|
||||
for tok in self.expand_macros(chunk):
|
||||
yield tok
|
||||
chunk = []
|
||||
self.undef(args)
|
||||
elif name == 'ifdef':
|
||||
ifstack.append((enable,iftrigger))
|
||||
if enable:
|
||||
if not args[0].value in self.macros:
|
||||
enable = False
|
||||
iftrigger = False
|
||||
else:
|
||||
iftrigger = True
|
||||
elif name == 'ifndef':
|
||||
ifstack.append((enable,iftrigger))
|
||||
if enable:
|
||||
if args[0].value in self.macros:
|
||||
enable = False
|
||||
iftrigger = False
|
||||
else:
|
||||
iftrigger = True
|
||||
elif name == 'if':
|
||||
ifstack.append((enable,iftrigger))
|
||||
if enable:
|
||||
result = self.evalexpr(args)
|
||||
if not result:
|
||||
enable = False
|
||||
iftrigger = False
|
||||
else:
|
||||
iftrigger = True
|
||||
elif name == 'elif':
|
||||
if ifstack:
|
||||
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
|
||||
if enable: # If already true, we flip enable False
|
||||
enable = False
|
||||
elif not iftrigger: # If False, but not triggered yet, we'll check expression
|
||||
result = self.evalexpr(args)
|
||||
if result:
|
||||
enable = True
|
||||
iftrigger = True
|
||||
else:
|
||||
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
|
||||
|
||||
elif name == 'else':
|
||||
if ifstack:
|
||||
if ifstack[-1][0]:
|
||||
if enable:
|
||||
enable = False
|
||||
elif not iftrigger:
|
||||
enable = True
|
||||
iftrigger = True
|
||||
else:
|
||||
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
|
||||
|
||||
elif name == 'endif':
|
||||
if ifstack:
|
||||
enable,iftrigger = ifstack.pop()
|
||||
else:
|
||||
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
|
||||
else:
|
||||
# Unknown preprocessor directive
|
||||
pass
|
||||
|
||||
else:
|
||||
# Normal text
|
||||
if enable:
|
||||
chunk.extend(x)
|
||||
|
||||
for tok in self.expand_macros(chunk):
|
||||
yield tok
|
||||
chunk = []
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# include()
|
||||
#
|
||||
# Implementation of file-inclusion
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def include(self,tokens):
|
||||
# Try to extract the filename and then process an include file
|
||||
if not tokens:
|
||||
return
|
||||
if tokens:
|
||||
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
|
||||
tokens = self.expand_macros(tokens)
|
||||
|
||||
if tokens[0].value == '<':
|
||||
# Include <...>
|
||||
i = 1
|
||||
while i < len(tokens):
|
||||
if tokens[i].value == '>':
|
||||
break
|
||||
i += 1
|
||||
else:
|
||||
print("Malformed #include <...>")
|
||||
return
|
||||
filename = "".join([x.value for x in tokens[1:i]])
|
||||
path = self.path + [""] + self.temp_path
|
||||
elif tokens[0].type == self.t_STRING:
|
||||
filename = tokens[0].value[1:-1]
|
||||
path = self.temp_path + [""] + self.path
|
||||
else:
|
||||
print("Malformed #include statement")
|
||||
return
|
||||
for p in path:
|
||||
iname = os.path.join(p,filename)
|
||||
try:
|
||||
data = open(iname,"r").read()
|
||||
dname = os.path.dirname(iname)
|
||||
if dname:
|
||||
self.temp_path.insert(0,dname)
|
||||
for tok in self.parsegen(data,filename):
|
||||
yield tok
|
||||
if dname:
|
||||
del self.temp_path[0]
|
||||
break
|
||||
except IOError:
|
||||
pass
|
||||
else:
|
||||
print("Couldn't find '%s'" % filename)
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# define()
|
||||
#
|
||||
# Define a new macro
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def define(self,tokens):
|
||||
if isinstance(tokens,(str,unicode)):
|
||||
tokens = self.tokenize(tokens)
|
||||
|
||||
linetok = tokens
|
||||
try:
|
||||
name = linetok[0]
|
||||
if len(linetok) > 1:
|
||||
mtype = linetok[1]
|
||||
else:
|
||||
mtype = None
|
||||
if not mtype:
|
||||
m = Macro(name.value,[])
|
||||
self.macros[name.value] = m
|
||||
elif mtype.type in self.t_WS:
|
||||
# A normal macro
|
||||
m = Macro(name.value,self.tokenstrip(linetok[2:]))
|
||||
self.macros[name.value] = m
|
||||
elif mtype.value == '(':
|
||||
# A macro with arguments
|
||||
tokcount, args, positions = self.collect_args(linetok[1:])
|
||||
variadic = False
|
||||
for a in args:
|
||||
if variadic:
|
||||
print("No more arguments may follow a variadic argument")
|
||||
break
|
||||
astr = "".join([str(_i.value) for _i in a])
|
||||
if astr == "...":
|
||||
variadic = True
|
||||
a[0].type = self.t_ID
|
||||
a[0].value = '__VA_ARGS__'
|
||||
variadic = True
|
||||
del a[1:]
|
||||
continue
|
||||
elif astr[-3:] == "..." and a[0].type == self.t_ID:
|
||||
variadic = True
|
||||
del a[1:]
|
||||
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
|
||||
# of macro expansion
|
||||
if a[0].value[-3:] == '...':
|
||||
a[0].value = a[0].value[:-3]
|
||||
continue
|
||||
if len(a) > 1 or a[0].type != self.t_ID:
|
||||
print("Invalid macro argument")
|
||||
break
|
||||
else:
|
||||
mvalue = self.tokenstrip(linetok[1+tokcount:])
|
||||
i = 0
|
||||
while i < len(mvalue):
|
||||
if i+1 < len(mvalue):
|
||||
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
|
||||
del mvalue[i]
|
||||
continue
|
||||
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
|
||||
del mvalue[i+1]
|
||||
i += 1
|
||||
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
|
||||
self.macro_prescan(m)
|
||||
self.macros[name.value] = m
|
||||
else:
|
||||
print("Bad macro definition")
|
||||
except LookupError:
|
||||
print("Bad macro definition")
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# undef()
|
||||
#
|
||||
# Undefine a macro
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def undef(self,tokens):
|
||||
id = tokens[0].value
|
||||
try:
|
||||
del self.macros[id]
|
||||
except LookupError:
|
||||
pass
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# parse()
|
||||
#
|
||||
# Parse input text.
|
||||
# ----------------------------------------------------------------------
|
||||
def parse(self,input,source=None,ignore={}):
|
||||
self.ignore = ignore
|
||||
self.parser = self.parsegen(input,source)
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# token()
|
||||
#
|
||||
# Method to return individual tokens
|
||||
# ----------------------------------------------------------------------
|
||||
def token(self):
|
||||
try:
|
||||
while True:
|
||||
tok = next(self.parser)
|
||||
if tok.type not in self.ignore: return tok
|
||||
except StopIteration:
|
||||
self.parser = None
|
||||
return None
|
||||
|
||||
if __name__ == '__main__':
|
||||
import ply.lex as lex
|
||||
lexer = lex.lex()
|
||||
|
||||
# Run a preprocessor
|
||||
import sys
|
||||
f = open(sys.argv[1])
|
||||
input = f.read()
|
||||
|
||||
p = Preprocessor(lexer)
|
||||
p.parse(input,sys.argv[1])
|
||||
while True:
|
||||
tok = p.token()
|
||||
if not tok: break
|
||||
print(p.source, tok)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
133
bin/ply/ctokens.py
Normal file
133
bin/ply/ctokens.py
Normal file
@@ -0,0 +1,133 @@
|
||||
# ----------------------------------------------------------------------
|
||||
# ctokens.py
|
||||
#
|
||||
# Token specifications for symbols in ANSI C and C++. This file is
|
||||
# meant to be used as a library in other tokenizers.
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
# Reserved words
|
||||
|
||||
tokens = [
|
||||
# Literals (identifier, integer constant, float constant, string constant, char const)
|
||||
'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
|
||||
|
||||
# Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
|
||||
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
|
||||
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
|
||||
'LOR', 'LAND', 'LNOT',
|
||||
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
|
||||
|
||||
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
|
||||
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
|
||||
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
|
||||
|
||||
# Increment/decrement (++,--)
|
||||
'PLUSPLUS', 'MINUSMINUS',
|
||||
|
||||
# Structure dereference (->)
|
||||
'ARROW',
|
||||
|
||||
# Ternary operator (?)
|
||||
'TERNARY',
|
||||
|
||||
# Delimeters ( ) [ ] { } , . ; :
|
||||
'LPAREN', 'RPAREN',
|
||||
'LBRACKET', 'RBRACKET',
|
||||
'LBRACE', 'RBRACE',
|
||||
'COMMA', 'PERIOD', 'SEMI', 'COLON',
|
||||
|
||||
# Ellipsis (...)
|
||||
'ELLIPSIS',
|
||||
]
|
||||
|
||||
# Operators
|
||||
t_PLUS = r'\+'
|
||||
t_MINUS = r'-'
|
||||
t_TIMES = r'\*'
|
||||
t_DIVIDE = r'/'
|
||||
t_MODULO = r'%'
|
||||
t_OR = r'\|'
|
||||
t_AND = r'&'
|
||||
t_NOT = r'~'
|
||||
t_XOR = r'\^'
|
||||
t_LSHIFT = r'<<'
|
||||
t_RSHIFT = r'>>'
|
||||
t_LOR = r'\|\|'
|
||||
t_LAND = r'&&'
|
||||
t_LNOT = r'!'
|
||||
t_LT = r'<'
|
||||
t_GT = r'>'
|
||||
t_LE = r'<='
|
||||
t_GE = r'>='
|
||||
t_EQ = r'=='
|
||||
t_NE = r'!='
|
||||
|
||||
# Assignment operators
|
||||
|
||||
t_EQUALS = r'='
|
||||
t_TIMESEQUAL = r'\*='
|
||||
t_DIVEQUAL = r'/='
|
||||
t_MODEQUAL = r'%='
|
||||
t_PLUSEQUAL = r'\+='
|
||||
t_MINUSEQUAL = r'-='
|
||||
t_LSHIFTEQUAL = r'<<='
|
||||
t_RSHIFTEQUAL = r'>>='
|
||||
t_ANDEQUAL = r'&='
|
||||
t_OREQUAL = r'\|='
|
||||
t_XOREQUAL = r'^='
|
||||
|
||||
# Increment/decrement
|
||||
t_INCREMENT = r'\+\+'
|
||||
t_DECREMENT = r'--'
|
||||
|
||||
# ->
|
||||
t_ARROW = r'->'
|
||||
|
||||
# ?
|
||||
t_TERNARY = r'\?'
|
||||
|
||||
# Delimeters
|
||||
t_LPAREN = r'\('
|
||||
t_RPAREN = r'\)'
|
||||
t_LBRACKET = r'\['
|
||||
t_RBRACKET = r'\]'
|
||||
t_LBRACE = r'\{'
|
||||
t_RBRACE = r'\}'
|
||||
t_COMMA = r','
|
||||
t_PERIOD = r'\.'
|
||||
t_SEMI = r';'
|
||||
t_COLON = r':'
|
||||
t_ELLIPSIS = r'\.\.\.'
|
||||
|
||||
# Identifiers
|
||||
t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
|
||||
|
||||
# Integer literal
|
||||
t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
|
||||
|
||||
# Floating literal
|
||||
t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
|
||||
|
||||
# String literal
|
||||
t_STRING = r'\"([^\\\n]|(\\.))*?\"'
|
||||
|
||||
# Character constant 'c' or L'c'
|
||||
t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
|
||||
|
||||
# Comment (C-Style)
|
||||
def t_COMMENT(t):
|
||||
r'/\*(.|\n)*?\*/'
|
||||
t.lexer.lineno += t.value.count('\n')
|
||||
return t
|
||||
|
||||
# Comment (C++-Style)
|
||||
def t_CPPCOMMENT(t):
|
||||
r'//.*\n'
|
||||
t.lexer.lineno += 1
|
||||
return t
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
1058
bin/ply/lex.py
Normal file
1058
bin/ply/lex.py
Normal file
File diff suppressed because it is too large
Load Diff
3276
bin/ply/yacc.py
Normal file
3276
bin/ply/yacc.py
Normal file
File diff suppressed because it is too large
Load Diff
0
bin/ripple/__init__.py
Normal file
0
bin/ripple/__init__.py
Normal file
152
bin/ripple/ledger/Args.py
Normal file
152
bin/ripple/ledger/Args.py
Normal file
@@ -0,0 +1,152 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import argparse
|
||||
import importlib
|
||||
import os
|
||||
|
||||
from ripple.ledger import LedgerNumber
|
||||
from ripple.util import File
|
||||
from ripple.util.Function import Function
|
||||
from ripple.util import Range
|
||||
|
||||
NAME = 'LedgerTool'
|
||||
VERSION = '0.1'
|
||||
|
||||
_parser = argparse.ArgumentParser(
|
||||
prog=NAME,
|
||||
description='Retrieve and process Ripple ledgers.',
|
||||
epilog=LedgerNumber.HELP,
|
||||
)
|
||||
|
||||
# Positional arguments.
|
||||
_parser.add_argument(
|
||||
'command',
|
||||
nargs='*',
|
||||
help='Command to execute.'
|
||||
)
|
||||
|
||||
# Flag arguments.
|
||||
_parser.add_argument(
|
||||
'--cache',
|
||||
default='~/.local/share/ripple/ledger',
|
||||
help='The cache directory.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--complete',
|
||||
action='store_true',
|
||||
help='If set, only match complete ledgers.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--condition', '-c',
|
||||
default='all_ledgers',
|
||||
help='The name of a condition function used to match ledgers.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--config',
|
||||
help='The rippled configuration file name.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--display', '-d',
|
||||
default='ledger_number',
|
||||
help='Specify a function to display ledgers.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--full', '-f',
|
||||
action='store_true',
|
||||
help='If true, request full ledgers.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--indent', '-i',
|
||||
type=int,
|
||||
default=2,
|
||||
help='How many spaces to indent when display in JSON.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--binary',
|
||||
action='store_true',
|
||||
help='If true, searches are binary - by default linear search is used.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--position', '-p',
|
||||
choices=['all', 'first', 'last'],
|
||||
default='last',
|
||||
help='Select which ledgers to display.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--rippled', '-r',
|
||||
help='The filename of a rippled binary for retrieving ledgers.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--server', '-s',
|
||||
help='IP address of a rippled JSON server.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--utc', '-u',
|
||||
action='store_true',
|
||||
help='If true, display times in UTC rather than local time.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--version',
|
||||
action='version',
|
||||
version='%(prog)s ' + VERSION,
|
||||
help='Print the current version of %(prog)s',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--verbose', '-v',
|
||||
action='store_true',
|
||||
help='If true, give status messages on stderr.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--window', '-w',
|
||||
type=int,
|
||||
default=0,
|
||||
help='How many ledgers to display around the matching ledger.',
|
||||
)
|
||||
|
||||
_parser.add_argument(
|
||||
'--yes', '-y',
|
||||
action='store_true',
|
||||
help='If true, don\'t ask for confirmation on large commands.',
|
||||
)
|
||||
|
||||
# Read the arguments from the command line.
|
||||
ARGS = _parser.parse_args()
|
||||
|
||||
# Now remove any items that look like ledger numbers from the command line.
|
||||
_command = ARGS.command
|
||||
_parts = (ARGS.command, ARGS.ledgers) = ([], [])
|
||||
|
||||
for c in _command:
|
||||
_parts[Range.is_range(c, *LedgerNumber.LEDGERS)].append(c)
|
||||
|
||||
ARGS.command = ARGS.command or ['print' if ARGS.ledgers else 'info']
|
||||
|
||||
ARGS.cache = File.normalize(ARGS.cache)
|
||||
ARGS.condition = Function(ARGS.condition, 'ripple.ledger.conditions')
|
||||
ARGS.display = Function(ARGS.display, 'ripple.ledger.displays')
|
||||
|
||||
if ARGS.window < 0:
|
||||
raise ValueError('Window cannot be negative: --window=%d' %
|
||||
ARGS.window)
|
||||
|
||||
_loaders = bool(ARGS.server) + bool(ARGS.rippled)
|
||||
|
||||
if not _loaders:
|
||||
ARGS.rippled = 'rippled'
|
||||
|
||||
elif _loaders > 1:
|
||||
raise ValueError('At most one of --rippled and --server must be specified')
|
||||
18
bin/ripple/ledger/LedgerNumber.py
Normal file
18
bin/ripple/ledger/LedgerNumber.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from ripple.util import Range
|
||||
|
||||
FIRST_EVER = 32570
|
||||
|
||||
LEDGERS = {
|
||||
'closed': 'the most recently closed ledger',
|
||||
'current': 'the current ledger',
|
||||
'first': 'the first complete ledger on this server',
|
||||
'last': 'the last complete ledger on this server',
|
||||
'validated': 'the most recently validated ledger',
|
||||
}
|
||||
|
||||
HELP = """
|
||||
Ledgers are either represented by a number, or one of the special ledgers;
|
||||
""" + ',\n'.join('%s, %s' % (k, v) for k, v in sorted(LEDGERS.items())
|
||||
)
|
||||
19
bin/ripple/ledger/Log.py
Normal file
19
bin/ripple/ledger/Log.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import sys
|
||||
|
||||
from ripple.ledger.Args import ARGS
|
||||
|
||||
def out(*args, **kwds):
|
||||
kwds.get('print', print)(*args, file=sys.stdout, **kwds)
|
||||
|
||||
def info(*args, **kwds):
|
||||
if ARGS.verbose:
|
||||
out(*args, **kwds)
|
||||
|
||||
def error(*args, **kwds):
|
||||
out('ERROR', *args, **kwds)
|
||||
|
||||
def fatal(*args, **kwds):
|
||||
out('FATAL', *args, **kwds)
|
||||
raise Exception('FATAL: ' + ' '.join(str(a) for a in args))
|
||||
21
bin/ripple/ledger/PrettyPrint.py
Normal file
21
bin/ripple/ledger/PrettyPrint.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from ripple.ledger.Args import ARGS
|
||||
|
||||
from functools import wraps
|
||||
import json
|
||||
|
||||
def pretty_print(item):
|
||||
return json.dumps(item,
|
||||
sort_keys=True,
|
||||
indent=ARGS.indent,
|
||||
separators=(',', ': '))
|
||||
|
||||
def pretty(f):
|
||||
""""A decorator on a function that makes its results pretty """
|
||||
@wraps(f)
|
||||
def wrapper(*args, **kwds):
|
||||
result = list(f(*args, **kwds))
|
||||
return pretty_print(result)
|
||||
|
||||
return wrapper
|
||||
53
bin/ripple/ledger/RippledReader.py
Normal file
53
bin/ripple/ledger/RippledReader.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from ripple.ledger.Args import ARGS
|
||||
from ripple.util import File
|
||||
from ripple.util import Range
|
||||
|
||||
_ERROR_CODE_REASON = {
|
||||
62: 'No rippled server is running.',
|
||||
}
|
||||
|
||||
_DEFAULT_ERROR_ = "Couldn't connect to server."
|
||||
|
||||
class RippledReader(object):
|
||||
def __init__(self):
|
||||
fname = File.normalize(ARGS.rippled)
|
||||
if not os.path.exists(fname):
|
||||
raise Exception('No rippled found at %s.' % fname)
|
||||
self.cmd = [fname]
|
||||
if ARGS.config:
|
||||
self.cmd.extend(['--conf', _normalize(ARGS.config)])
|
||||
self.info = self._command('server_info')['info']
|
||||
c = self.info.get('complete_ledgers')
|
||||
if c == 'empty':
|
||||
self.complete = []
|
||||
else:
|
||||
self.complete = sorted(Range.from_string(c))
|
||||
|
||||
def name_to_ledger_index(self, ledger_name, is_full=False):
|
||||
return self.get_ledger(ledger_name, is_full)['ledger_index']
|
||||
|
||||
def get_ledger(self, name, is_full=False):
|
||||
cmd = ['ledger', str(name)]
|
||||
if is_full:
|
||||
cmd.append('full')
|
||||
return self._command(*cmd)['ledger']
|
||||
|
||||
def _command(self, *cmds):
|
||||
cmd = self.cmd + list(cmds)
|
||||
try:
|
||||
data = subprocess.check_output(cmd, stderr=subprocess.PIPE)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise Exception(_ERROR_CODE_REASON.get(
|
||||
e.returncode, _DEFAULT_ERROR_))
|
||||
|
||||
part = json.loads(data)
|
||||
try:
|
||||
return part['result']
|
||||
except:
|
||||
raise ValueError(part.get('error', 'unknown error'))
|
||||
24
bin/ripple/ledger/SearchLedgers.py
Normal file
24
bin/ripple/ledger/SearchLedgers.py
Normal file
@@ -0,0 +1,24 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import sys
|
||||
|
||||
from ripple.ledger.Args import ARGS
|
||||
from ripple.ledger import Log
|
||||
from ripple.util import Range
|
||||
from ripple.util import Search
|
||||
|
||||
def search(server):
|
||||
"""Yields a stream of ledger numbers that match the given condition."""
|
||||
condition = lambda number: ARGS.condition(server, number)
|
||||
ledgers = server.ledgers
|
||||
if ARGS.binary:
|
||||
try:
|
||||
position = Search.FIRST if ARGS.position == 'first' else Search.LAST
|
||||
yield Search.binary_search(
|
||||
ledgers[0], ledgers[-1], condition, position)
|
||||
except:
|
||||
Log.fatal('No ledgers matching condition "%s".' % condition,
|
||||
file=sys.stderr)
|
||||
else:
|
||||
for x in Search.linear_search(ledgers, condition):
|
||||
yield x
|
||||
43
bin/ripple/ledger/Server.py
Normal file
43
bin/ripple/ledger/Server.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from ripple.ledger import RippledReader, ServerReader
|
||||
from ripple.ledger.Args import ARGS
|
||||
from ripple.util.FileCache import file_cache
|
||||
from ripple.util import Range
|
||||
|
||||
class Server(object):
|
||||
def __init__(self):
|
||||
if ARGS.rippled:
|
||||
reader = RippledReader.RippledReader()
|
||||
else:
|
||||
reader = ServerReader.ServerReader()
|
||||
|
||||
self.reader = reader
|
||||
|
||||
self.complete = reader.complete
|
||||
|
||||
names = {
|
||||
'closed': reader.name_to_ledger_index('closed'),
|
||||
'current': reader.name_to_ledger_index('current'),
|
||||
'validated': reader.name_to_ledger_index('validated'),
|
||||
'first': self.complete[0],
|
||||
'last': self.complete[-1],
|
||||
}
|
||||
self.__dict__.update(names)
|
||||
self.ledgers = sorted(Range.join_ranges(*ARGS.ledgers, **names))
|
||||
|
||||
def make_cache(is_full):
|
||||
name = 'full' if is_full else 'summary'
|
||||
filepath = os.path.join(ARGS.cache, name)
|
||||
creator = lambda n: reader.get_ledger(n, is_full)
|
||||
return file_cache(filepath, creator)
|
||||
self.caches = [make_cache(False), make_cache(True)]
|
||||
|
||||
def info(self):
|
||||
return self.reader.info
|
||||
|
||||
def get_ledger(self, number, is_full=False):
|
||||
return self.caches[is_full](number, int(number) in self.complete)
|
||||
5
bin/ripple/ledger/ServerReader.py
Normal file
5
bin/ripple/ledger/ServerReader.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
class ServerReader(object):
|
||||
def __init__(self, server):
|
||||
raise ValueError('Direct server connections are not yet implemented.')
|
||||
0
bin/ripple/ledger/__init__.py
Normal file
0
bin/ripple/ledger/__init__.py
Normal file
19
bin/ripple/ledger/commands/Info.py
Normal file
19
bin/ripple/ledger/commands/Info.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from ripple.ledger.Args import ARGS
|
||||
from ripple.ledger import Log
|
||||
from ripple.ledger.PrettyPrint import pretty_print
|
||||
from ripple.util import Range
|
||||
|
||||
SAFE = True
|
||||
|
||||
HELP = 'info - return server_info'
|
||||
|
||||
def info(server):
|
||||
Log.out('closed =', server.closed)
|
||||
Log.out('current =', server.current)
|
||||
Log.out('validated =', server.validated)
|
||||
Log.out('complete =', Range.to_string(server.complete))
|
||||
|
||||
if ARGS.full:
|
||||
Log.out(pretty_print(server.info()))
|
||||
16
bin/ripple/ledger/commands/Print.py
Normal file
16
bin/ripple/ledger/commands/Print.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from ripple.ledger.Args import ARGS
|
||||
from ripple.ledger import SearchLedgers
|
||||
|
||||
import json
|
||||
|
||||
SAFE = True
|
||||
|
||||
HELP = """print
|
||||
|
||||
Print the ledgers to stdout. The default command."""
|
||||
|
||||
def run_print(server):
|
||||
for x in ARGS.display(server, SearchLedgers.search(server)):
|
||||
print(x)
|
||||
0
bin/ripple/ledger/commands/__init__.py
Normal file
0
bin/ripple/ledger/commands/__init__.py
Normal file
4
bin/ripple/ledger/conditions/__init__.py
Normal file
4
bin/ripple/ledger/conditions/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
def all_ledgers(server, ledger_number):
|
||||
return True
|
||||
71
bin/ripple/ledger/displays/__init__.py
Normal file
71
bin/ripple/ledger/displays/__init__.py
Normal file
@@ -0,0 +1,71 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from functools import wraps
|
||||
|
||||
import jsonpath_rw
|
||||
|
||||
from ripple.ledger.Args import ARGS
|
||||
from ripple.ledger.PrettyPrint import pretty_print
|
||||
from ripple.util.Decimal import Decimal
|
||||
from ripple.util import Dict
|
||||
from ripple.util import Range
|
||||
|
||||
def ledger_number(server, numbers):
|
||||
yield Range.to_string(numbers)
|
||||
|
||||
def display(f):
|
||||
"""A decorator for displays that just print JSON"""
|
||||
@wraps(f)
|
||||
def wrapper(server, numbers, *args, **kwds):
|
||||
for number in numbers:
|
||||
ledger = server.get_ledger(number, ARGS.full)
|
||||
yield pretty_print(f(ledger, *args, **kwds))
|
||||
return wrapper
|
||||
|
||||
def json(f):
|
||||
"""A decorator for displays that print JSON, extracted by a path"""
|
||||
@wraps(f)
|
||||
def wrapper(server, numbers, path, *args, **kwds):
|
||||
try:
|
||||
path_expr = jsonpath_rw.parse(path)
|
||||
except:
|
||||
raise ValueError("Can't understand jsonpath '%s'." % path)
|
||||
|
||||
for number in numbers:
|
||||
ledger = server.get_ledger(number, ARGS.full)
|
||||
finds = path_expr.find(ledger)
|
||||
yield pretty_print(f(finds, *args, **kwds))
|
||||
return wrapper
|
||||
|
||||
|
||||
@display
|
||||
def ledger(led):
|
||||
return led
|
||||
|
||||
@display
|
||||
def prune(ledger, level=2):
|
||||
return Dict.prune(ledger, level, False)
|
||||
|
||||
TRANSACT_FIELDS = (
|
||||
'accepted',
|
||||
'close_time_human',
|
||||
'closed',
|
||||
'ledger_index',
|
||||
'total_coins',
|
||||
'transactions',
|
||||
)
|
||||
|
||||
@display
|
||||
def transact(ledger):
|
||||
return dict((f, ledger[f]) for f in TRANSACT_FIELDS)
|
||||
|
||||
@json
|
||||
def extract(finds):
|
||||
return dict((str(f.full_path), str(f.value)) for f in finds)
|
||||
|
||||
@json
|
||||
def sum(finds):
|
||||
d = Decimal()
|
||||
for f in finds:
|
||||
d.accumulate(f.value)
|
||||
return [str(d), len(finds)]
|
||||
40
bin/ripple/util/Cache.py
Normal file
40
bin/ripple/util/Cache.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
class Cache(object):
|
||||
def __init__(self):
|
||||
self._value_to_index = {}
|
||||
self._index_to_value = []
|
||||
|
||||
def value_to_index(self, value, **kwds):
|
||||
index = self._value_to_index.get(value, None)
|
||||
if index is None:
|
||||
index = len(self._index_to_value)
|
||||
self._index_to_value.append((value, kwds))
|
||||
self._value_to_index[value] = index
|
||||
return index
|
||||
|
||||
def index_to_value(self, index):
|
||||
return self._index_to_value[index]
|
||||
|
||||
def NamedCache():
|
||||
return defaultdict(Cache)
|
||||
|
||||
def cache_by_key(d, keyfunc=None, exclude=None):
|
||||
cache = defaultdict(Cache)
|
||||
exclude = exclude or None
|
||||
keyfunc = keyfunc or (lambda x: x)
|
||||
|
||||
def visit(item):
|
||||
if isinstance(item, list):
|
||||
for i, x in enumerate(item):
|
||||
item[i] = visit(x)
|
||||
|
||||
elif isinstance(item, dict):
|
||||
for k, v in item.items():
|
||||
item[k] = visit(v)
|
||||
|
||||
return item
|
||||
|
||||
return cache
|
||||
77
bin/ripple/util/CommandList.py
Normal file
77
bin/ripple/util/CommandList.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
# Code taken from github/rec/grit.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
from ripple.ledger.Args import ARGS
|
||||
from ripple.util import Log
|
||||
|
||||
Command = namedtuple('Command', 'function help safe')
|
||||
|
||||
def make_command(module):
|
||||
name = module.__name__.split('.')[-1].lower()
|
||||
return name, Command(getattr(module, name, None) or
|
||||
getattr(module, 'run_' + name),
|
||||
getattr(module, 'HELP'),
|
||||
getattr(module, 'SAFE', False))
|
||||
|
||||
class CommandList(object):
|
||||
def __init__(self, *args, **kwds):
|
||||
self.registry = {}
|
||||
self.register(*args, **kwds)
|
||||
|
||||
def register(self, *modules, **kwds):
|
||||
for module in modules:
|
||||
name, command = make_command(module)
|
||||
self.registry[name] = command
|
||||
|
||||
for k, v in kwds.items():
|
||||
if not isinstance(v, (list, tuple)):
|
||||
v = [v]
|
||||
self.register_one(k, *v)
|
||||
|
||||
def keys(self):
|
||||
return self.registry.keys()
|
||||
|
||||
def register_one(self, name, function, help='', safe=False):
|
||||
assert name not in self.registry
|
||||
self.registry[name] = Command(function, help, safe)
|
||||
|
||||
def _get(self, command):
|
||||
command = command.lower()
|
||||
c = self.registry.get(command)
|
||||
if c:
|
||||
return command, c
|
||||
commands = [c for c in self.registry if c.startswith(command)]
|
||||
if len(commands) == 1:
|
||||
command = commands[0]
|
||||
return command, self.registry[command]
|
||||
if not commands:
|
||||
raise ValueError('No such command: %s. Commands are %s.' %
|
||||
(command, ', '.join(sorted(self.registry))))
|
||||
if len(commands) > 1:
|
||||
raise ValueError('Command %s was ambiguous: %s.' %
|
||||
(command, ', '.join(commands)))
|
||||
|
||||
def get(self, command):
|
||||
return self._get(command)[1]
|
||||
|
||||
def run(self, command, *args):
|
||||
return self.get(command).function(*args)
|
||||
|
||||
def run_safe(self, command, *args):
|
||||
name, cmd = self._get(command)
|
||||
if not (ARGS.yes or cmd.safe):
|
||||
confirm = raw_input('OK to execute "rl %s %s"? (y/N) ' %
|
||||
(name, ' '.join(args)))
|
||||
if not confirm.lower().startswith('y'):
|
||||
Log.error('Cancelled.')
|
||||
return
|
||||
cmd.function(*args)
|
||||
|
||||
def help(self, command):
|
||||
return self.get(command).help()
|
||||
46
bin/ripple/util/Decimal.py
Normal file
46
bin/ripple/util/Decimal.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
"""Fixed point numbers."""
|
||||
|
||||
POSITIONS = 10
|
||||
POSITIONS_SHIFT = 10 ** POSITIONS
|
||||
|
||||
class Decimal(object):
|
||||
def __init__(self, desc='0'):
|
||||
if isinstance(desc, int):
|
||||
self.value = desc
|
||||
return
|
||||
if desc.startswith('-'):
|
||||
sign = -1
|
||||
desc = desc[1:]
|
||||
else:
|
||||
sign = 1
|
||||
parts = desc.split('.')
|
||||
if len(parts) == 1:
|
||||
parts.append('0')
|
||||
elif len(parts) > 2:
|
||||
raise Exception('Too many decimals in "%s"' % desc)
|
||||
number, decimal = parts
|
||||
# Fix the number of positions.
|
||||
decimal = (decimal + POSITIONS * '0')[:POSITIONS]
|
||||
self.value = sign * int(number + decimal)
|
||||
|
||||
def accumulate(self, item):
|
||||
if not isinstance(item, Decimal):
|
||||
item = Decimal(item)
|
||||
self.value += item.value
|
||||
|
||||
def __str__(self):
|
||||
if self.value >= 0:
|
||||
sign = ''
|
||||
value = self.value
|
||||
else:
|
||||
sign = '-'
|
||||
value = -self.value
|
||||
number = value // POSITIONS_SHIFT
|
||||
decimal = (value % POSITIONS_SHIFT) * POSITIONS_SHIFT
|
||||
|
||||
if decimal:
|
||||
return '%s%s.%s' % (sign, number, str(decimal).rstrip('0'))
|
||||
else:
|
||||
return '%s%s' % (sign, number)
|
||||
33
bin/ripple/util/Dict.py
Normal file
33
bin/ripple/util/Dict.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
def count_all_subitems(x):
|
||||
"""Count the subitems of a Python object, including the object itself."""
|
||||
if isinstance(x, list):
|
||||
return 1 + sum(count_all_subitems(i) for i in x)
|
||||
if isinstance(x, dict):
|
||||
return 1 + sum(count_all_subitems(i) for i in x.itervalues())
|
||||
return 1
|
||||
|
||||
def prune(item, level, count_recursively=True):
|
||||
def subitems(x):
|
||||
i = count_all_subitems(x) - 1 if count_recursively else len(x)
|
||||
return '1 subitem' if i == 1 else '%d subitems' % i
|
||||
|
||||
assert level >= 0
|
||||
if not item:
|
||||
return item
|
||||
|
||||
if isinstance(item, list):
|
||||
if level:
|
||||
return [prune(i, level - 1, count_recursively) for i in item]
|
||||
else:
|
||||
return '[list with %s]' % subitems(item)
|
||||
|
||||
if isinstance(item, dict):
|
||||
if level:
|
||||
return dict((k, prune(v, level - 1, count_recursively))
|
||||
for k, v in item.iteritems())
|
||||
else:
|
||||
return '{dict with %s}' % subitems(item)
|
||||
|
||||
return item
|
||||
7
bin/ripple/util/File.py
Normal file
7
bin/ripple/util/File.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import os
|
||||
|
||||
def normalize(f):
|
||||
f = os.path.join(*f.split('/')) # For Windows users.
|
||||
return os.path.abspath(os.path.expanduser(f))
|
||||
33
bin/ripple/util/FileCache.py
Normal file
33
bin/ripple/util/FileCache.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import json
|
||||
import gzip
|
||||
import os
|
||||
|
||||
_NONE = object()
|
||||
|
||||
def file_cache(filename_prefix, creator, open=gzip.open, suffix='.gz'):
|
||||
"""A two-level cache, which stores expensive results in memory and on disk.
|
||||
"""
|
||||
cached_data = {}
|
||||
if not os.path.exists(filename_prefix):
|
||||
os.makedirs(filename_prefix)
|
||||
|
||||
def get_file_data(name):
|
||||
filename = os.path.join(filename_prefix, str(name)) + suffix
|
||||
if os.path.exists(filename):
|
||||
return json.load(open(filename))
|
||||
|
||||
result = creator(name)
|
||||
json.dump(result, open(filename, 'w'))
|
||||
return result
|
||||
|
||||
def get_data(name, use_file_cache=True):
|
||||
result = cached_data.get(name, _NONE)
|
||||
if result is _NONE:
|
||||
maker = get_file_data if use_file_cache else creator
|
||||
result = maker(name)
|
||||
cached_data[name] = result
|
||||
return result
|
||||
|
||||
return get_data
|
||||
49
bin/ripple/util/Function.py
Normal file
49
bin/ripple/util/Function.py
Normal file
@@ -0,0 +1,49 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
"""A function that can be specified at the command line, with an argument."""
|
||||
|
||||
import importlib
|
||||
import re
|
||||
|
||||
MATCHER = re.compile(r'([\w.]+)(.*)')
|
||||
|
||||
def _split_function(desc):
|
||||
m = MATCHER.match(desc)
|
||||
if not m:
|
||||
raise ValueError('"%s" is not a function' % desc)
|
||||
name, args = (g.strip() for g in m.groups())
|
||||
args = eval(args or '()') # Yes, really eval()!
|
||||
if not isinstance(args, tuple):
|
||||
args = (args,)
|
||||
return name, args
|
||||
|
||||
class Function(object):
|
||||
def __init__(self, desc='', default_path=''):
|
||||
self.desc = desc.strip()
|
||||
if not self.desc:
|
||||
# Make an empty function that does nothing.
|
||||
self.args = ()
|
||||
self.function = lambda *args, **kwds: None
|
||||
return
|
||||
|
||||
self.function, self.args = _split_function(self.desc)
|
||||
if '.' not in self.function:
|
||||
if default_path and not default_path.endswith('.'):
|
||||
default_path += '.'
|
||||
self.function = default_path + self.function
|
||||
p, m = self.function.rsplit('.', 1)
|
||||
try:
|
||||
mod = importlib.import_module(p)
|
||||
except:
|
||||
raise ValueError('Can\'t find Python module "%s"' % p)
|
||||
|
||||
try:
|
||||
self.function = getattr(mod, m)
|
||||
except:
|
||||
raise ValueError('No function "%s" in module "%s"' % (p, m))
|
||||
|
||||
def __str__(self):
|
||||
return self.desc
|
||||
|
||||
def __call__(self, *args, **kwds):
|
||||
return self.function(*(args + self.args), **kwds)
|
||||
36
bin/ripple/util/Log.py
Normal file
36
bin/ripple/util/Log.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
def count_all_subitems(x):
|
||||
"""Count the subitems of a Python object, including the object itself."""
|
||||
if isinstance(x, list):
|
||||
return 1 + sum(count_all_subitems(i) for i in x)
|
||||
if isinstance(x, dict):
|
||||
return 1 + sum(count_all_subitems(i) for i in x.values())
|
||||
return 1
|
||||
|
||||
def prune(item, level, count_recursively=True):
|
||||
def subitems(x):
|
||||
if count_recursively:
|
||||
i = count_all_subitems(x) - 1
|
||||
else:
|
||||
i = len(x)
|
||||
return '1 subitem' if (i == 1) else '%d subitems' % i
|
||||
|
||||
assert level >= 0
|
||||
if not item:
|
||||
return item
|
||||
|
||||
if isinstance(item, list):
|
||||
if level:
|
||||
return [prune(i, level - 1, count_recursively) for i in item]
|
||||
else:
|
||||
return '[list with %s]' % subitems(item)
|
||||
|
||||
if isinstance(item, dict):
|
||||
if level:
|
||||
return dict((k, prune(v, level - 1, count_recursively))
|
||||
for k, v in item.iteritems())
|
||||
else:
|
||||
return '{dict with %s}' % subitems(item)
|
||||
|
||||
return item
|
||||
53
bin/ripple/util/Range.py
Normal file
53
bin/ripple/util/Range.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
"""
|
||||
Convert a discontiguous range of integers to and from a human-friendly form.
|
||||
|
||||
Real world example is the server_info.complete_ledgers:
|
||||
8252899-8403772,8403824,8403827-8403830,8403834-8403876
|
||||
|
||||
"""
|
||||
|
||||
def from_string(desc, **aliases):
|
||||
if not desc:
|
||||
return []
|
||||
result = set()
|
||||
for d in desc.split(','):
|
||||
nums = [aliases.get(x, None) or int(x) for x in d.split('-')]
|
||||
if len(nums) == 1:
|
||||
result.add(nums[0])
|
||||
elif len(nums) == 2:
|
||||
result.update(range(nums[0], nums[1] + 1))
|
||||
return result
|
||||
|
||||
def to_string(r):
|
||||
groups = []
|
||||
next_group = []
|
||||
for i, x in enumerate(sorted(r)):
|
||||
if next_group and (x - next_group[-1]) > 1:
|
||||
groups.append(next_group)
|
||||
next_group = []
|
||||
next_group.append(x)
|
||||
if next_group:
|
||||
groups.append(next_group)
|
||||
|
||||
def display(g):
|
||||
if len(g) == 1:
|
||||
return str(g[0])
|
||||
else:
|
||||
return '%s-%s' % (g[0], g[-1])
|
||||
|
||||
return ','.join(display(g) for g in groups)
|
||||
|
||||
def is_range(desc, *names):
|
||||
try:
|
||||
from_string(desc, **dict((n, 1) for n in names))
|
||||
return True;
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
def join_ranges(*ranges, **aliases):
|
||||
result = set()
|
||||
for r in ranges:
|
||||
result.update(from_string(r, **aliases))
|
||||
return result
|
||||
46
bin/ripple/util/Search.py
Normal file
46
bin/ripple/util/Search.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
FIRST, LAST = range(2)
|
||||
|
||||
def binary_search(begin, end, condition, location=FIRST):
|
||||
"""Search for an i in the interval [begin, end] where condition(i) is true.
|
||||
If location is FIRST, return the first such i.
|
||||
If location is LAST, return the last such i.
|
||||
If there is no such i, then throw an exception.
|
||||
"""
|
||||
b = condition(begin)
|
||||
e = condition(end)
|
||||
if b and e:
|
||||
return begin if location == FIRST else end
|
||||
|
||||
if not (b or e):
|
||||
raise ValueError('%d/%d' % (begin, end))
|
||||
|
||||
if b and location is FIRST:
|
||||
return begin
|
||||
|
||||
if e and location is LAST:
|
||||
return end
|
||||
|
||||
width = end - begin + 1
|
||||
if width == 1:
|
||||
if not b:
|
||||
raise ValueError('%d/%d' % (begin, end))
|
||||
return begin
|
||||
if width == 2:
|
||||
return begin if b else end
|
||||
|
||||
mid = (begin + end) // 2
|
||||
m = condition(mid)
|
||||
|
||||
if m == b:
|
||||
return binary_search(mid, end, condition, location)
|
||||
else:
|
||||
return binary_search(begin, mid, condition, location)
|
||||
|
||||
def linear_search(items, condition):
|
||||
"""Yields each i in the interval [begin, end] where condition(i) is true.
|
||||
"""
|
||||
for i in items:
|
||||
if condition(i):
|
||||
yield i
|
||||
21
bin/ripple/util/Time.py
Normal file
21
bin/ripple/util/Time.py
Normal file
@@ -0,0 +1,21 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
import datetime
|
||||
|
||||
# Format for human-readable dates in rippled
|
||||
_DATE_FORMAT = '%Y-%b-%d'
|
||||
_TIME_FORMAT = '%H:%M:%S'
|
||||
_DATETIME_FORMAT = '%s %s' % (_DATE_FORMAT, _TIME_FORMAT)
|
||||
|
||||
_FORMATS = _DATE_FORMAT, _TIME_FORMAT, _DATETIME_FORMAT
|
||||
|
||||
def parse_datetime(desc):
|
||||
for fmt in _FORMATS:
|
||||
try:
|
||||
return datetime.date.strptime(desc, fmt)
|
||||
except:
|
||||
pass
|
||||
raise ValueError("Can't understand date '%s'." % date)
|
||||
|
||||
def format_datetime(dt):
|
||||
return dt.strftime(_DATETIME_FORMAT)
|
||||
0
bin/ripple/util/__init__.py
Normal file
0
bin/ripple/util/__init__.py
Normal file
12
bin/ripple/util/test_Cache.py
Normal file
12
bin/ripple/util/test_Cache.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from ripple.util.Cache import NamedCache
|
||||
|
||||
from unittest import TestCase
|
||||
|
||||
class test_Cache(TestCase):
|
||||
def setUp(self):
|
||||
self.cache = NamedCache()
|
||||
|
||||
def test_trivial(self):
|
||||
pass
|
||||
20
bin/ripple/util/test_Decimal.py
Normal file
20
bin/ripple/util/test_Decimal.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from ripple.util.Decimal import Decimal
|
||||
|
||||
from unittest import TestCase
|
||||
|
||||
class test_Decimal(TestCase):
|
||||
def test_construct(self):
|
||||
self.assertEquals(str(Decimal('')), '0')
|
||||
self.assertEquals(str(Decimal('0')), '0')
|
||||
self.assertEquals(str(Decimal('0.2')), '0.2')
|
||||
self.assertEquals(str(Decimal('-0.2')), '-0.2')
|
||||
self.assertEquals(str(Decimal('3.1416')), '3.1416')
|
||||
|
||||
def test_accumulate(self):
|
||||
d = Decimal()
|
||||
d.accumulate('0.5')
|
||||
d.accumulate('3.1416')
|
||||
d.accumulate('-23.34234')
|
||||
self.assertEquals(str(d), '-19.70074')
|
||||
56
bin/ripple/util/test_Dict.py
Normal file
56
bin/ripple/util/test_Dict.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from ripple.util import Dict
|
||||
|
||||
from unittest import TestCase
|
||||
|
||||
class test_Dict(TestCase):
|
||||
def test_count_all_subitems(self):
|
||||
self.assertEquals(Dict.count_all_subitems({}), 1)
|
||||
self.assertEquals(Dict.count_all_subitems({'a': {}}), 2)
|
||||
self.assertEquals(Dict.count_all_subitems([1]), 2)
|
||||
self.assertEquals(Dict.count_all_subitems([1, 2]), 3)
|
||||
self.assertEquals(Dict.count_all_subitems([1, {2: 3}]), 4)
|
||||
self.assertEquals(Dict.count_all_subitems([1, {2: [3]}]), 5)
|
||||
self.assertEquals(Dict.count_all_subitems([1, {2: [3, 4]}]), 6)
|
||||
|
||||
def test_prune(self):
|
||||
self.assertEquals(Dict.prune({}, 0), {})
|
||||
self.assertEquals(Dict.prune({}, 1), {})
|
||||
|
||||
self.assertEquals(Dict.prune({1: 2}, 0), '{dict with 1 subitem}')
|
||||
self.assertEquals(Dict.prune({1: 2}, 1), {1: 2})
|
||||
self.assertEquals(Dict.prune({1: 2}, 2), {1: 2})
|
||||
|
||||
self.assertEquals(Dict.prune([1, 2, 3], 0), '[list with 3 subitems]')
|
||||
self.assertEquals(Dict.prune([1, 2, 3], 1), [1, 2, 3])
|
||||
|
||||
self.assertEquals(Dict.prune([{1: [2, 3]}], 0),
|
||||
'[list with 4 subitems]')
|
||||
self.assertEquals(Dict.prune([{1: [2, 3]}], 1),
|
||||
['{dict with 3 subitems}'])
|
||||
self.assertEquals(Dict.prune([{1: [2, 3]}], 2),
|
||||
[{1: u'[list with 2 subitems]'}])
|
||||
self.assertEquals(Dict.prune([{1: [2, 3]}], 3),
|
||||
[{1: [2, 3]}])
|
||||
|
||||
def test_prune_nosub(self):
|
||||
self.assertEquals(Dict.prune({}, 0, False), {})
|
||||
self.assertEquals(Dict.prune({}, 1, False), {})
|
||||
|
||||
self.assertEquals(Dict.prune({1: 2}, 0, False), '{dict with 1 subitem}')
|
||||
self.assertEquals(Dict.prune({1: 2}, 1, False), {1: 2})
|
||||
self.assertEquals(Dict.prune({1: 2}, 2, False), {1: 2})
|
||||
|
||||
self.assertEquals(Dict.prune([1, 2, 3], 0, False),
|
||||
'[list with 3 subitems]')
|
||||
self.assertEquals(Dict.prune([1, 2, 3], 1, False), [1, 2, 3])
|
||||
|
||||
self.assertEquals(Dict.prune([{1: [2, 3]}], 0, False),
|
||||
'[list with 1 subitem]')
|
||||
self.assertEquals(Dict.prune([{1: [2, 3]}], 1, False),
|
||||
['{dict with 1 subitem}'])
|
||||
self.assertEquals(Dict.prune([{1: [2, 3]}], 2, False),
|
||||
[{1: u'[list with 2 subitems]'}])
|
||||
self.assertEquals(Dict.prune([{1: [2, 3]}], 3, False),
|
||||
[{1: [2, 3]}])
|
||||
25
bin/ripple/util/test_Function.py
Normal file
25
bin/ripple/util/test_Function.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from ripple.util.Function import Function, MATCHER
|
||||
|
||||
from unittest import TestCase
|
||||
|
||||
def FN(*args, **kwds):
|
||||
return args, kwds
|
||||
|
||||
class test_Function(TestCase):
|
||||
def match_test(self, item, *results):
|
||||
self.assertEquals(MATCHER.match(item).groups(), results)
|
||||
|
||||
def test_simple(self):
|
||||
self.match_test('function', 'function', '')
|
||||
self.match_test('f(x)', 'f', '(x)')
|
||||
|
||||
def test_empty_function(self):
|
||||
self.assertEquals(Function()(), None)
|
||||
|
||||
def test_function(self):
|
||||
f = Function('ripple.util.test_Function.FN(True, {1: 2}, None)')
|
||||
self.assertEquals(f(), ((True, {1: 2}, None), {}))
|
||||
self.assertEquals(f('hello', foo='bar'),
|
||||
(('hello', True, {1: 2}, None), {'foo':'bar'}))
|
||||
28
bin/ripple/util/test_Range.py
Normal file
28
bin/ripple/util/test_Range.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from ripple.util import Range
|
||||
|
||||
from unittest import TestCase
|
||||
|
||||
class test_Range(TestCase):
|
||||
def round_trip(self, s, *items):
|
||||
self.assertEquals(Range.from_string(s), set(items))
|
||||
self.assertEquals(Range.to_string(items), s)
|
||||
|
||||
def test_complete(self):
|
||||
self.round_trip('10,19', 10, 19)
|
||||
self.round_trip('10', 10)
|
||||
self.round_trip('10-12', 10, 11, 12)
|
||||
self.round_trip('10,19,42-45', 10, 19, 42, 43, 44, 45)
|
||||
|
||||
def test_names(self):
|
||||
self.assertEquals(
|
||||
Range.from_string('first,last,current', first=1, last=3, current=5),
|
||||
set([1, 3, 5]))
|
||||
|
||||
def test_is_range(self):
|
||||
self.assertTrue(Range.is_range(''))
|
||||
self.assertTrue(Range.is_range('10'))
|
||||
self.assertTrue(Range.is_range('10,12'))
|
||||
self.assertFalse(Range.is_range('10,12,fred'))
|
||||
self.assertTrue(Range.is_range('10,12,fred', 'fred'))
|
||||
44
bin/ripple/util/test_Search.py
Normal file
44
bin/ripple/util/test_Search.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from __future__ import absolute_import, division, print_function, unicode_literals
|
||||
|
||||
from ripple.util.Search import binary_search, linear_search, FIRST, LAST
|
||||
|
||||
from unittest import TestCase
|
||||
|
||||
class test_Search(TestCase):
|
||||
def condition(self, i):
|
||||
return 10 <= i < 15;
|
||||
|
||||
def test_linear_full(self):
|
||||
self.assertEquals(list(linear_search(range(21), self.condition)),
|
||||
[10, 11, 12, 13, 14])
|
||||
|
||||
def test_linear_partial(self):
|
||||
self.assertEquals(list(linear_search(range(8, 14), self.condition)),
|
||||
[10, 11, 12, 13])
|
||||
self.assertEquals(list(linear_search(range(11, 14), self.condition)),
|
||||
[11, 12, 13])
|
||||
self.assertEquals(list(linear_search(range(12, 18), self.condition)),
|
||||
[12, 13, 14])
|
||||
|
||||
def test_linear_empty(self):
|
||||
self.assertEquals(list(linear_search(range(1, 4), self.condition)), [])
|
||||
|
||||
def test_binary_first(self):
|
||||
self.assertEquals(binary_search(0, 14, self.condition, FIRST), 10)
|
||||
self.assertEquals(binary_search(10, 19, self.condition, FIRST), 10)
|
||||
self.assertEquals(binary_search(14, 14, self.condition, FIRST), 14)
|
||||
self.assertEquals(binary_search(14, 15, self.condition, FIRST), 14)
|
||||
self.assertEquals(binary_search(13, 15, self.condition, FIRST), 13)
|
||||
|
||||
def test_binary_last(self):
|
||||
self.assertEquals(binary_search(10, 20, self.condition, LAST), 14)
|
||||
self.assertEquals(binary_search(0, 14, self.condition, LAST), 14)
|
||||
self.assertEquals(binary_search(14, 14, self.condition, LAST), 14)
|
||||
self.assertEquals(binary_search(14, 15, self.condition, LAST), 14)
|
||||
self.assertEquals(binary_search(13, 15, self.condition, LAST), 14)
|
||||
|
||||
def test_binary_throws(self):
|
||||
self.assertRaises(
|
||||
ValueError, binary_search, 0, 20, self.condition, LAST)
|
||||
self.assertRaises(
|
||||
ValueError, binary_search, 0, 20, self.condition, FIRST)
|
||||
747
bin/six.py
Normal file
747
bin/six.py
Normal file
@@ -0,0 +1,747 @@
|
||||
"""Utilities for writing code that runs on Python 2 and 3"""
|
||||
|
||||
# Copyright (c) 2010-2014 Benjamin Peterson
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
import functools
|
||||
import operator
|
||||
import sys
|
||||
import types
|
||||
|
||||
__author__ = "Benjamin Peterson <benjamin@python.org>"
|
||||
__version__ = "1.7.3"
|
||||
|
||||
|
||||
# Useful for very coarse version differentiation.
|
||||
PY2 = sys.version_info[0] == 2
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
string_types = str,
|
||||
integer_types = int,
|
||||
class_types = type,
|
||||
text_type = str
|
||||
binary_type = bytes
|
||||
|
||||
MAXSIZE = sys.maxsize
|
||||
else:
|
||||
string_types = basestring,
|
||||
integer_types = (int, long)
|
||||
class_types = (type, types.ClassType)
|
||||
text_type = unicode
|
||||
binary_type = str
|
||||
|
||||
if sys.platform.startswith("java"):
|
||||
# Jython always uses 32 bits.
|
||||
MAXSIZE = int((1 << 31) - 1)
|
||||
else:
|
||||
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
|
||||
class X(object):
|
||||
def __len__(self):
|
||||
return 1 << 31
|
||||
try:
|
||||
len(X())
|
||||
except OverflowError:
|
||||
# 32-bit
|
||||
MAXSIZE = int((1 << 31) - 1)
|
||||
else:
|
||||
# 64-bit
|
||||
MAXSIZE = int((1 << 63) - 1)
|
||||
del X
|
||||
|
||||
|
||||
def _add_doc(func, doc):
|
||||
"""Add documentation to a function."""
|
||||
func.__doc__ = doc
|
||||
|
||||
|
||||
def _import_module(name):
|
||||
"""Import module, returning the module after the last dot."""
|
||||
__import__(name)
|
||||
return sys.modules[name]
|
||||
|
||||
|
||||
class _LazyDescr(object):
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def __get__(self, obj, tp):
|
||||
result = self._resolve()
|
||||
setattr(obj, self.name, result) # Invokes __set__.
|
||||
# This is a bit ugly, but it avoids running this again.
|
||||
delattr(obj.__class__, self.name)
|
||||
return result
|
||||
|
||||
|
||||
class MovedModule(_LazyDescr):
|
||||
|
||||
def __init__(self, name, old, new=None):
|
||||
super(MovedModule, self).__init__(name)
|
||||
if PY3:
|
||||
if new is None:
|
||||
new = name
|
||||
self.mod = new
|
||||
else:
|
||||
self.mod = old
|
||||
|
||||
def _resolve(self):
|
||||
return _import_module(self.mod)
|
||||
|
||||
def __getattr__(self, attr):
|
||||
_module = self._resolve()
|
||||
value = getattr(_module, attr)
|
||||
setattr(self, attr, value)
|
||||
return value
|
||||
|
||||
|
||||
class _LazyModule(types.ModuleType):
|
||||
|
||||
def __init__(self, name):
|
||||
super(_LazyModule, self).__init__(name)
|
||||
self.__doc__ = self.__class__.__doc__
|
||||
|
||||
def __dir__(self):
|
||||
attrs = ["__doc__", "__name__"]
|
||||
attrs += [attr.name for attr in self._moved_attributes]
|
||||
return attrs
|
||||
|
||||
# Subclasses should override this
|
||||
_moved_attributes = []
|
||||
|
||||
|
||||
class MovedAttribute(_LazyDescr):
|
||||
|
||||
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
|
||||
super(MovedAttribute, self).__init__(name)
|
||||
if PY3:
|
||||
if new_mod is None:
|
||||
new_mod = name
|
||||
self.mod = new_mod
|
||||
if new_attr is None:
|
||||
if old_attr is None:
|
||||
new_attr = name
|
||||
else:
|
||||
new_attr = old_attr
|
||||
self.attr = new_attr
|
||||
else:
|
||||
self.mod = old_mod
|
||||
if old_attr is None:
|
||||
old_attr = name
|
||||
self.attr = old_attr
|
||||
|
||||
def _resolve(self):
|
||||
module = _import_module(self.mod)
|
||||
return getattr(module, self.attr)
|
||||
|
||||
|
||||
class _SixMetaPathImporter(object):
|
||||
"""
|
||||
A meta path importer to import six.moves and its submodules.
|
||||
|
||||
This class implements a PEP302 finder and loader. It should be compatible
|
||||
with Python 2.5 and all existing versions of Python3
|
||||
"""
|
||||
def __init__(self, six_module_name):
|
||||
self.name = six_module_name
|
||||
self.known_modules = {}
|
||||
|
||||
def _add_module(self, mod, *fullnames):
|
||||
for fullname in fullnames:
|
||||
self.known_modules[self.name + "." + fullname] = mod
|
||||
|
||||
def _get_module(self, fullname):
|
||||
return self.known_modules[self.name + "." + fullname]
|
||||
|
||||
def find_module(self, fullname, path=None):
|
||||
if fullname in self.known_modules:
|
||||
return self
|
||||
return None
|
||||
|
||||
def __get_module(self, fullname):
|
||||
try:
|
||||
return self.known_modules[fullname]
|
||||
except KeyError:
|
||||
raise ImportError("This loader does not know module " + fullname)
|
||||
|
||||
def load_module(self, fullname):
|
||||
try:
|
||||
# in case of a reload
|
||||
return sys.modules[fullname]
|
||||
except KeyError:
|
||||
pass
|
||||
mod = self.__get_module(fullname)
|
||||
if isinstance(mod, MovedModule):
|
||||
mod = mod._resolve()
|
||||
else:
|
||||
mod.__loader__ = self
|
||||
sys.modules[fullname] = mod
|
||||
return mod
|
||||
|
||||
def is_package(self, fullname):
|
||||
"""
|
||||
Return true, if the named module is a package.
|
||||
|
||||
We need this method to get correct spec objects with
|
||||
Python 3.4 (see PEP451)
|
||||
"""
|
||||
return hasattr(self.__get_module(fullname), "__path__")
|
||||
|
||||
def get_code(self, fullname):
|
||||
"""Return None
|
||||
|
||||
Required, if is_package is implemented"""
|
||||
self.__get_module(fullname) # eventually raises ImportError
|
||||
return None
|
||||
get_source = get_code # same as get_code
|
||||
|
||||
_importer = _SixMetaPathImporter(__name__)
|
||||
|
||||
|
||||
class _MovedItems(_LazyModule):
|
||||
"""Lazy loading of moved objects"""
|
||||
__path__ = [] # mark as package
|
||||
|
||||
|
||||
_moved_attributes = [
|
||||
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
|
||||
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
|
||||
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
|
||||
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
|
||||
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
|
||||
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
|
||||
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
|
||||
MovedAttribute("reduce", "__builtin__", "functools"),
|
||||
MovedAttribute("StringIO", "StringIO", "io"),
|
||||
MovedAttribute("UserDict", "UserDict", "collections"),
|
||||
MovedAttribute("UserList", "UserList", "collections"),
|
||||
MovedAttribute("UserString", "UserString", "collections"),
|
||||
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
|
||||
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
|
||||
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
|
||||
|
||||
MovedModule("builtins", "__builtin__"),
|
||||
MovedModule("configparser", "ConfigParser"),
|
||||
MovedModule("copyreg", "copy_reg"),
|
||||
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
|
||||
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
|
||||
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
|
||||
MovedModule("http_cookies", "Cookie", "http.cookies"),
|
||||
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
|
||||
MovedModule("html_parser", "HTMLParser", "html.parser"),
|
||||
MovedModule("http_client", "httplib", "http.client"),
|
||||
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
|
||||
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
|
||||
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
|
||||
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
|
||||
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
|
||||
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
|
||||
MovedModule("cPickle", "cPickle", "pickle"),
|
||||
MovedModule("queue", "Queue"),
|
||||
MovedModule("reprlib", "repr"),
|
||||
MovedModule("socketserver", "SocketServer"),
|
||||
MovedModule("_thread", "thread", "_thread"),
|
||||
MovedModule("tkinter", "Tkinter"),
|
||||
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
|
||||
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
|
||||
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
|
||||
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
|
||||
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
|
||||
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
|
||||
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
|
||||
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
|
||||
MovedModule("tkinter_colorchooser", "tkColorChooser",
|
||||
"tkinter.colorchooser"),
|
||||
MovedModule("tkinter_commondialog", "tkCommonDialog",
|
||||
"tkinter.commondialog"),
|
||||
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
|
||||
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
|
||||
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
|
||||
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
|
||||
"tkinter.simpledialog"),
|
||||
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
|
||||
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
|
||||
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
|
||||
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
|
||||
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
|
||||
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
|
||||
MovedModule("winreg", "_winreg"),
|
||||
]
|
||||
for attr in _moved_attributes:
|
||||
setattr(_MovedItems, attr.name, attr)
|
||||
if isinstance(attr, MovedModule):
|
||||
_importer._add_module(attr, "moves." + attr.name)
|
||||
del attr
|
||||
|
||||
_MovedItems._moved_attributes = _moved_attributes
|
||||
|
||||
moves = _MovedItems(__name__ + ".moves")
|
||||
_importer._add_module(moves, "moves")
|
||||
|
||||
|
||||
class Module_six_moves_urllib_parse(_LazyModule):
|
||||
"""Lazy loading of moved objects in six.moves.urllib_parse"""
|
||||
|
||||
|
||||
_urllib_parse_moved_attributes = [
|
||||
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
|
||||
MovedAttribute("quote", "urllib", "urllib.parse"),
|
||||
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
|
||||
MovedAttribute("unquote", "urllib", "urllib.parse"),
|
||||
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
|
||||
MovedAttribute("urlencode", "urllib", "urllib.parse"),
|
||||
MovedAttribute("splitquery", "urllib", "urllib.parse"),
|
||||
]
|
||||
for attr in _urllib_parse_moved_attributes:
|
||||
setattr(Module_six_moves_urllib_parse, attr.name, attr)
|
||||
del attr
|
||||
|
||||
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
|
||||
"moves.urllib_parse", "moves.urllib.parse")
|
||||
|
||||
|
||||
class Module_six_moves_urllib_error(_LazyModule):
|
||||
"""Lazy loading of moved objects in six.moves.urllib_error"""
|
||||
|
||||
|
||||
_urllib_error_moved_attributes = [
|
||||
MovedAttribute("URLError", "urllib2", "urllib.error"),
|
||||
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
|
||||
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
|
||||
]
|
||||
for attr in _urllib_error_moved_attributes:
|
||||
setattr(Module_six_moves_urllib_error, attr.name, attr)
|
||||
del attr
|
||||
|
||||
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
|
||||
"moves.urllib_error", "moves.urllib.error")
|
||||
|
||||
|
||||
class Module_six_moves_urllib_request(_LazyModule):
|
||||
"""Lazy loading of moved objects in six.moves.urllib_request"""
|
||||
|
||||
|
||||
_urllib_request_moved_attributes = [
|
||||
MovedAttribute("urlopen", "urllib2", "urllib.request"),
|
||||
MovedAttribute("install_opener", "urllib2", "urllib.request"),
|
||||
MovedAttribute("build_opener", "urllib2", "urllib.request"),
|
||||
MovedAttribute("pathname2url", "urllib", "urllib.request"),
|
||||
MovedAttribute("url2pathname", "urllib", "urllib.request"),
|
||||
MovedAttribute("getproxies", "urllib", "urllib.request"),
|
||||
MovedAttribute("Request", "urllib2", "urllib.request"),
|
||||
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
|
||||
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
|
||||
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
|
||||
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
|
||||
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
|
||||
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
|
||||
MovedAttribute("URLopener", "urllib", "urllib.request"),
|
||||
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
|
||||
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
|
||||
]
|
||||
for attr in _urllib_request_moved_attributes:
|
||||
setattr(Module_six_moves_urllib_request, attr.name, attr)
|
||||
del attr
|
||||
|
||||
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
|
||||
"moves.urllib_request", "moves.urllib.request")
|
||||
|
||||
|
||||
class Module_six_moves_urllib_response(_LazyModule):
|
||||
"""Lazy loading of moved objects in six.moves.urllib_response"""
|
||||
|
||||
|
||||
_urllib_response_moved_attributes = [
|
||||
MovedAttribute("addbase", "urllib", "urllib.response"),
|
||||
MovedAttribute("addclosehook", "urllib", "urllib.response"),
|
||||
MovedAttribute("addinfo", "urllib", "urllib.response"),
|
||||
MovedAttribute("addinfourl", "urllib", "urllib.response"),
|
||||
]
|
||||
for attr in _urllib_response_moved_attributes:
|
||||
setattr(Module_six_moves_urllib_response, attr.name, attr)
|
||||
del attr
|
||||
|
||||
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
|
||||
"moves.urllib_response", "moves.urllib.response")
|
||||
|
||||
|
||||
class Module_six_moves_urllib_robotparser(_LazyModule):
|
||||
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
|
||||
|
||||
|
||||
_urllib_robotparser_moved_attributes = [
|
||||
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
|
||||
]
|
||||
for attr in _urllib_robotparser_moved_attributes:
|
||||
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
|
||||
del attr
|
||||
|
||||
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
|
||||
"moves.urllib_robotparser", "moves.urllib.robotparser")
|
||||
|
||||
|
||||
class Module_six_moves_urllib(types.ModuleType):
|
||||
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
|
||||
__path__ = [] # mark as package
|
||||
parse = _importer._get_module("moves.urllib_parse")
|
||||
error = _importer._get_module("moves.urllib_error")
|
||||
request = _importer._get_module("moves.urllib_request")
|
||||
response = _importer._get_module("moves.urllib_response")
|
||||
robotparser = _importer._get_module("moves.urllib_robotparser")
|
||||
|
||||
def __dir__(self):
|
||||
return ['parse', 'error', 'request', 'response', 'robotparser']
|
||||
|
||||
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
|
||||
"moves.urllib")
|
||||
|
||||
|
||||
def add_move(move):
|
||||
"""Add an item to six.moves."""
|
||||
setattr(_MovedItems, move.name, move)
|
||||
|
||||
|
||||
def remove_move(name):
|
||||
"""Remove item from six.moves."""
|
||||
try:
|
||||
delattr(_MovedItems, name)
|
||||
except AttributeError:
|
||||
try:
|
||||
del moves.__dict__[name]
|
||||
except KeyError:
|
||||
raise AttributeError("no such move, %r" % (name,))
|
||||
|
||||
|
||||
if PY3:
|
||||
_meth_func = "__func__"
|
||||
_meth_self = "__self__"
|
||||
|
||||
_func_closure = "__closure__"
|
||||
_func_code = "__code__"
|
||||
_func_defaults = "__defaults__"
|
||||
_func_globals = "__globals__"
|
||||
else:
|
||||
_meth_func = "im_func"
|
||||
_meth_self = "im_self"
|
||||
|
||||
_func_closure = "func_closure"
|
||||
_func_code = "func_code"
|
||||
_func_defaults = "func_defaults"
|
||||
_func_globals = "func_globals"
|
||||
|
||||
|
||||
try:
|
||||
advance_iterator = next
|
||||
except NameError:
|
||||
def advance_iterator(it):
|
||||
return it.next()
|
||||
next = advance_iterator
|
||||
|
||||
|
||||
try:
|
||||
callable = callable
|
||||
except NameError:
|
||||
def callable(obj):
|
||||
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
|
||||
|
||||
|
||||
if PY3:
|
||||
def get_unbound_function(unbound):
|
||||
return unbound
|
||||
|
||||
create_bound_method = types.MethodType
|
||||
|
||||
Iterator = object
|
||||
else:
|
||||
def get_unbound_function(unbound):
|
||||
return unbound.im_func
|
||||
|
||||
def create_bound_method(func, obj):
|
||||
return types.MethodType(func, obj, obj.__class__)
|
||||
|
||||
class Iterator(object):
|
||||
|
||||
def next(self):
|
||||
return type(self).__next__(self)
|
||||
|
||||
callable = callable
|
||||
_add_doc(get_unbound_function,
|
||||
"""Get the function out of a possibly unbound function""")
|
||||
|
||||
|
||||
get_method_function = operator.attrgetter(_meth_func)
|
||||
get_method_self = operator.attrgetter(_meth_self)
|
||||
get_function_closure = operator.attrgetter(_func_closure)
|
||||
get_function_code = operator.attrgetter(_func_code)
|
||||
get_function_defaults = operator.attrgetter(_func_defaults)
|
||||
get_function_globals = operator.attrgetter(_func_globals)
|
||||
|
||||
|
||||
if PY3:
|
||||
def iterkeys(d, **kw):
|
||||
return iter(d.keys(**kw))
|
||||
|
||||
def itervalues(d, **kw):
|
||||
return iter(d.values(**kw))
|
||||
|
||||
def iteritems(d, **kw):
|
||||
return iter(d.items(**kw))
|
||||
|
||||
def iterlists(d, **kw):
|
||||
return iter(d.lists(**kw))
|
||||
else:
|
||||
def iterkeys(d, **kw):
|
||||
return iter(d.iterkeys(**kw))
|
||||
|
||||
def itervalues(d, **kw):
|
||||
return iter(d.itervalues(**kw))
|
||||
|
||||
def iteritems(d, **kw):
|
||||
return iter(d.iteritems(**kw))
|
||||
|
||||
def iterlists(d, **kw):
|
||||
return iter(d.iterlists(**kw))
|
||||
|
||||
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
|
||||
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
|
||||
_add_doc(iteritems,
|
||||
"Return an iterator over the (key, value) pairs of a dictionary.")
|
||||
_add_doc(iterlists,
|
||||
"Return an iterator over the (key, [values]) pairs of a dictionary.")
|
||||
|
||||
|
||||
if PY3:
|
||||
def b(s):
|
||||
return s.encode("latin-1")
|
||||
def u(s):
|
||||
return s
|
||||
unichr = chr
|
||||
if sys.version_info[1] <= 1:
|
||||
def int2byte(i):
|
||||
return bytes((i,))
|
||||
else:
|
||||
# This is about 2x faster than the implementation above on 3.2+
|
||||
int2byte = operator.methodcaller("to_bytes", 1, "big")
|
||||
byte2int = operator.itemgetter(0)
|
||||
indexbytes = operator.getitem
|
||||
iterbytes = iter
|
||||
import io
|
||||
StringIO = io.StringIO
|
||||
BytesIO = io.BytesIO
|
||||
else:
|
||||
def b(s):
|
||||
return s
|
||||
# Workaround for standalone backslash
|
||||
def u(s):
|
||||
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
|
||||
unichr = unichr
|
||||
int2byte = chr
|
||||
def byte2int(bs):
|
||||
return ord(bs[0])
|
||||
def indexbytes(buf, i):
|
||||
return ord(buf[i])
|
||||
def iterbytes(buf):
|
||||
return (ord(byte) for byte in buf)
|
||||
import StringIO
|
||||
StringIO = BytesIO = StringIO.StringIO
|
||||
_add_doc(b, """Byte literal""")
|
||||
_add_doc(u, """Text literal""")
|
||||
|
||||
|
||||
if PY3:
|
||||
exec_ = getattr(moves.builtins, "exec")
|
||||
|
||||
|
||||
def reraise(tp, value, tb=None):
|
||||
if value.__traceback__ is not tb:
|
||||
raise value.with_traceback(tb)
|
||||
raise value
|
||||
|
||||
else:
|
||||
def exec_(_code_, _globs_=None, _locs_=None):
|
||||
"""Execute code in a namespace."""
|
||||
if _globs_ is None:
|
||||
frame = sys._getframe(1)
|
||||
_globs_ = frame.f_globals
|
||||
if _locs_ is None:
|
||||
_locs_ = frame.f_locals
|
||||
del frame
|
||||
elif _locs_ is None:
|
||||
_locs_ = _globs_
|
||||
exec("""exec _code_ in _globs_, _locs_""")
|
||||
|
||||
|
||||
exec_("""def reraise(tp, value, tb=None):
|
||||
raise tp, value, tb
|
||||
""")
|
||||
|
||||
|
||||
print_ = getattr(moves.builtins, "print", None)
|
||||
if print_ is None:
|
||||
def print_(*args, **kwargs):
|
||||
"""The new-style print function for Python 2.4 and 2.5."""
|
||||
fp = kwargs.pop("file", sys.stdout)
|
||||
if fp is None:
|
||||
return
|
||||
def write(data):
|
||||
if not isinstance(data, basestring):
|
||||
data = str(data)
|
||||
# If the file has an encoding, encode unicode with it.
|
||||
if (isinstance(fp, file) and
|
||||
isinstance(data, unicode) and
|
||||
fp.encoding is not None):
|
||||
errors = getattr(fp, "errors", None)
|
||||
if errors is None:
|
||||
errors = "strict"
|
||||
data = data.encode(fp.encoding, errors)
|
||||
fp.write(data)
|
||||
want_unicode = False
|
||||
sep = kwargs.pop("sep", None)
|
||||
if sep is not None:
|
||||
if isinstance(sep, unicode):
|
||||
want_unicode = True
|
||||
elif not isinstance(sep, str):
|
||||
raise TypeError("sep must be None or a string")
|
||||
end = kwargs.pop("end", None)
|
||||
if end is not None:
|
||||
if isinstance(end, unicode):
|
||||
want_unicode = True
|
||||
elif not isinstance(end, str):
|
||||
raise TypeError("end must be None or a string")
|
||||
if kwargs:
|
||||
raise TypeError("invalid keyword arguments to print()")
|
||||
if not want_unicode:
|
||||
for arg in args:
|
||||
if isinstance(arg, unicode):
|
||||
want_unicode = True
|
||||
break
|
||||
if want_unicode:
|
||||
newline = unicode("\n")
|
||||
space = unicode(" ")
|
||||
else:
|
||||
newline = "\n"
|
||||
space = " "
|
||||
if sep is None:
|
||||
sep = space
|
||||
if end is None:
|
||||
end = newline
|
||||
for i, arg in enumerate(args):
|
||||
if i:
|
||||
write(sep)
|
||||
write(arg)
|
||||
write(end)
|
||||
|
||||
_add_doc(reraise, """Reraise an exception.""")
|
||||
|
||||
if sys.version_info[0:2] < (3, 4):
|
||||
def wraps(wrapped):
|
||||
def wrapper(f):
|
||||
f = functools.wraps(wrapped)(f)
|
||||
f.__wrapped__ = wrapped
|
||||
return f
|
||||
return wrapper
|
||||
else:
|
||||
wraps = functools.wraps
|
||||
|
||||
def with_metaclass(meta, *bases):
|
||||
"""Create a base class with a metaclass."""
|
||||
# This requires a bit of explanation: the basic idea is to make a dummy
|
||||
# metaclass for one level of class instantiation that replaces itself with
|
||||
# the actual metaclass.
|
||||
class metaclass(meta):
|
||||
def __new__(cls, name, this_bases, d):
|
||||
return meta(name, bases, d)
|
||||
return type.__new__(metaclass, 'temporary_class', (), {})
|
||||
|
||||
|
||||
def add_metaclass(metaclass):
|
||||
"""Class decorator for creating a class with a metaclass."""
|
||||
def wrapper(cls):
|
||||
orig_vars = cls.__dict__.copy()
|
||||
orig_vars.pop('__dict__', None)
|
||||
orig_vars.pop('__weakref__', None)
|
||||
slots = orig_vars.get('__slots__')
|
||||
if slots is not None:
|
||||
if isinstance(slots, str):
|
||||
slots = [slots]
|
||||
for slots_var in slots:
|
||||
orig_vars.pop(slots_var)
|
||||
return metaclass(cls.__name__, cls.__bases__, orig_vars)
|
||||
return wrapper
|
||||
|
||||
# Complete the moves implementation.
|
||||
# This code is at the end of this module to speed up module loading.
|
||||
# Turn this module into a package.
|
||||
__path__ = [] # required for PEP 302 and PEP 451
|
||||
__package__ = __name__ # see PEP 366 @ReservedAssignment
|
||||
if globals().get("__spec__") is not None:
|
||||
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
|
||||
# Remove other six meta path importers, since they cause problems. This can
|
||||
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
|
||||
# this for some reason.)
|
||||
if sys.meta_path:
|
||||
for i, importer in enumerate(sys.meta_path):
|
||||
# Here's some real nastiness: Another "instance" of the six module might
|
||||
# be floating around. Therefore, we can't use isinstance() to check for
|
||||
# the six meta path importer, since the other six instance will have
|
||||
# inserted an importer with different class.
|
||||
if (type(importer).__name__ == "_SixMetaPathImporter" and
|
||||
importer.name == __name__):
|
||||
del sys.meta_path[i]
|
||||
break
|
||||
del i, importer
|
||||
# Finally, add the importer to the meta path import hook.
|
||||
sys.meta_path.append(_importer)
|
||||
Reference in New Issue
Block a user