code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
"""
Monte Carlo Approximation of Pi
Statistical Version
3/28/15
Albert Liang
Uses a Monte Carlo method to approximate Pi. User can control accuracy by determining the number of random samples.
This program builds on top of mcPi.py, by taking many trial runs and determining the standard deviation and error of the sum of the results. This should make it easier to test the strength of the program.
Just for lulz, the data from each trial won't be saved to a list to be processed. Instead, the data will be written to an external file. Another function will later read from that file and then run the statistics. Yes, it would be easier just to use a list, but where's the fun in that?
NOTES:
# All points on or contained within the border of the circle are considered "in the circle".
# 'center' is a list of size 2. Format is [x,y]. We assume that x and y are integers.
# We assume that 'side' and 'radius' are integers greater than 0.
# Learn to use exceptions to handle the above cases, instead of relying on if-statements upon input?
"""
from math import *
from random import *
# Classes ======================================================================================================
class Square(object):
def __init__(self,side,center):
self.side = side
self.c_x = center[0]
self.c_y = center[1]
self.max_x = self.c_x + 0.5*side
self.min_x = self.c_x - 0.5*side
self.max_y = self.c_y + 0.5*side
self.min_y = self.c_y - 0.5*side
def getRandomPoint(self):
point = [0,0]
point[0] = randint(self.min_x,self.max_x)
point[1] = randint(self.min_y,self.max_y)
return point
class Circle(object):
def __init__(self,radius,center):
self.radius = radius
self.c_x = center[0]
self.c_y = center[1]
def pointIsInCircle(self,point):
d = getDistance([self.c_x,self.c_y],point)
return d <= self.radius
# Global Functions =============================================================================================
def getDistance(p1,p2):
x_diff = p2[0]-p1[0]
y_diff = p2[1]-p1[1]
a = pow(x_diff,2)
b = pow(y_diff,2)
d = pow( (a+b),0.5 )
return d
# Check if String Represents an Integer
# (From Stackoverflow)
def StringRepInt(s):
try:
int(s)
return True
except ValueError:
return False
def initFiles(numTrial,numIter):
with open('mcPi_Data.dat','w+') as w:
pass
with open('mcPi_Report.txt','w+') as w:
w.write('Report: Monte Carlo Approximation of Pi\n\n\n')
w.write('Number of Trials: ' + str(numTrial))
w.write('\nSamples per Trial: ' + str(numIter))
w.write('\n\nTrial\t\tApprox\t\t\t\tError\t\t\t\t\n')
w.write('=======\t\t======\t\t\t\t======\t\t\t\t\n')
def runTrial(numIter):
# Intialize Square and Circle
center = [0,0]
radius = 1000000000000
square = Square(2*radius,center)
circle = Circle(radius,center)
# Initialize Counts
count_cir = 0.00
count_total = 0.00
for i in range(numIter):
point = square.getRandomPoint()
count_total += 1
if( circle.pointIsInCircle(point) ):
count_cir += 1
approx = (count_cir/count_total) * 4
error = approx - pi
s_approx = str(approx)
s_error = str(error)
with open('mcPi_Data.dat','a') as w:
w.write( s_approx + '\n' )
with open('mcPi_Report.txt','a') as w:
w.write( s_approx + '\t\t\t\t' + s_error + '\t\t\t\t\n' )
#print s_approx
def runStatistics():
sum = 0.00
square_sum = 0.00
count = 0.00
with open('mcPi_Data.dat','r') as data:
while(True):
s = data.readline()
if( s == '' ):
break
count += 1
sum += float(s)
error = float(s) - pi
square_sum += pow(error,2)
variance = square_sum/count
std = pow(variance,0.5)
mean = sum/count
rel_error = std/mean
print 'Mean: ',mean
print 'STD: ',std
print 'RErr: ',rel_error
with open('mcPi_Report.txt','a') as w:
w.write( '\n\nResults\n' )
w.write( '================' )
w.write( '\nMean:\t\t' + str(mean) )
w.write( '\nStd Deviation:\t' + str(std) )
w.write( '\nRelative Err:\t' + str(rel_error) )
def getExpectedRelError(N):
expectedRE = pow( (4/pi - 1)/N ,0.5 )
print 'ExRE: ',expectedRE
with open('mcPi_Report.txt','a') as w:
w.write( '\nExpctd ReErr:\t' + str(expectedRE) )
w.write( '\n' )
# Main =========================================================================================================
def main():
print '\n\nMonte Carlo Approximation of Pi:'
print '=================================================='
while(True):
numIter = raw_input( '\nNumber of samples to draw ( \'q\' to quit): ' )
if( numIter.lower() == 'q' ):
break
elif( not( StringRepInt(numIter) ) or int(numIter) <= 0 ):
print 'Please enter an integer greater than 0.'
continue
numIter = int(numIter)
numTrial = raw_input( 'Number of Trials: ' )
if( not( StringRepInt(numTrial) ) or int(numTrial) <= 0 ):
print 'Please enter an integer greater than 0.'
continue
numTrial = int(numTrial)
initFiles(numTrial,numIter)
print( 'Initialized Readout Files...' )
print( 'Running Trials...' )
for i in range(numTrial):
with open('mcPi_Report.txt','a') as w:
w.write( str(i+1) + '\t\t' )
runTrial(numIter)
print( 'Crunching Stats...' )
runStatistics()
getExpectedRelError(numIter)
print '\nExiting...\n\n'
# Run ==========================================================================================================
main()
| albertliangcode/Pi_MonteCarloSim | s_mcPi.py | Python | mit | 5,504 |
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.3.3'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
'OrderedDict',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decimal import Decimal
from decoder import JSONDecoder, JSONDecodeError
from encoder import JSONEncoder
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import ordered_dict
return ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
try:
from simplejson._speedups import make_encoder
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True,
namedtuple_as_object=True,
tuple_as_array=True,
**kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
If *namedtuple_as_object* is true (default: ``True``),
:class:`tuple` subclasses with ``_asdict()`` methods will be encoded
as JSON objects.
If *tuple_as_array* is true (default: ``True``),
:class:`tuple` (and subclasses) will be encoded as JSON arrays.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and namedtuple_as_object and tuple_as_array and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal,
namedtuple_as_object=namedtuple_as_object,
tuple_as_array=tuple_as_array,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, namedtuple_as_object=True, tuple_as_array=True,
**kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and not use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if use_decimal:
if parse_float is not None:
raise TypeError("use_decimal=True implies parse_float=Decimal")
kw['parse_float'] = Decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import decoder as dec
import encoder as enc
import scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
| mx3L/archivczsk | build/plugin/src/resources/libraries/simplejson/__init__.py | Python | gpl-2.0 | 18,584 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-17 11:53
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('learn', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='languagedictionnary',
old_name='name',
new_name='language',
),
]
| Aigrefin/py3learn | learn/migrations/0002_auto_20160517_1353.py | Python | mit | 427 |
# -*- coding: utf-8 -*-
#
# Clang.jl documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 27 21:11:35 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import juliadoc
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['juliadoc.julia', 'juliadoc.jlhelp']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clang.jl'
copyright = u'2013, Isaiah H. Norton'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [juliadoc.get_theme_dir()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = juliadoc.default_sidebars()
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Clangjldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Clangjl.tex', u'Clang.jl Documentation',
u'Isaiah H. Norton', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'clangjl', u'Clang.jl Documentation',
[u'Isaiah H. Norton'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Clangjl', u'Clang.jl Documentation',
u'Isaiah H. Norton', 'Clangjl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mdcfrancis/Clang.jl | doc/conf.py | Python | mit | 7,840 |
_base_ = './rpn_r50_fpn_2x_coco.py'
model = dict(
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch',
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
| open-mmlab/mmdetection | configs/rpn/rpn_x101_32x4d_fpn_2x_coco.py | Python | apache-2.0 | 413 |
from django.contrib.auth.models import User
from django.test import TestCase
import autofixture
autofixture.autodiscover()
class AutodiscoverTestCase(TestCase):
def test_builtin_fixtures(self):
from autofixture.autofixtures import UserFixture
self.assertEqual(autofixture.REGISTRY[User], UserFixture)
| gregmuellegger/django-autofixture | autofixture_tests/tests/test_autodiscover.py | Python | bsd-3-clause | 325 |
"""
Module simplifying manipulation of XML described at
http://libvirt.org/formatstorage.html#StorageVol
"""
from virttest.libvirt_xml import base, accessors
from virttest.libvirt_xml.xcepts import LibvirtXMLNotFoundError
class VolXMLBase(base.LibvirtXMLBase):
"""
Accessor methods for VolXML class.
Properties:
name: string, operates on XML name tag
key: string, operates on key tag
capacity: integer, operates on capacity attribute of capacity tag
allocation: integer, operates on allocation attribute of allocation
format: string, operates on type attribute of format tag
path: string, operates on path attribute of path tag
owner, integer, operates on owner attribute of owner tag
group, integer, operates on group attribute of group tag
mode: string, operates on mode attribute of mode tag
label: string, operates on label attribute of label tag
compat: string, operates on compat attribute of label tag
lazy_refcounts: bool, True/False
encryption: VolXMLBase.Encryption instance.
capacity_unit: string, operates on unit attribute of capacity tag
"""
__slots__ = ('name', 'key', 'capacity', 'allocation', 'format', 'path',
'owner', 'group', 'mode', 'label', 'compat', 'lazy_refcounts',
'encryption', "capacity_unit")
__uncompareable__ = base.LibvirtXMLBase.__uncompareable__
__schema_name__ = "storagevol"
def __init__(self, virsh_instance=base.virsh):
accessors.XMLElementText('name', self, parent_xpath='/',
tag_name='name')
accessors.XMLElementText('key', self, parent_xpath='/',
tag_name='key')
accessors.XMLElementInt('capacity', self, parent_xpath='/',
tag_name='capacity')
accessors.XMLElementInt('allocation', self, parent_xpath='/',
tag_name='allocation')
accessors.XMLAttribute('format', self, parent_xpath='/target',
tag_name='format', attribute='type')
accessors.XMLAttribute('capacity_unit', self, parent_xpath='/',
tag_name='capacity', attribute='unit')
accessors.XMLElementNest('encryption', self, parent_xpath='/target',
tag_name='encryption', subclass=self.Encryption,
subclass_dargs={
'virsh_instance': virsh_instance})
accessors.XMLElementText('path', self, parent_xpath='/target',
tag_name='path')
accessors.XMLElementInt('owner', self,
parent_xpath='/target/permissions',
tag_name='owner')
accessors.XMLElementInt('group', self,
parent_xpath='/target/permissions',
tag_name='group')
accessors.XMLElementText('mode', self,
parent_xpath='/target/permissions',
tag_name='mode')
accessors.XMLElementText('label', self,
parent_xpath='/target/permissions',
tag_name='label')
accessors.XMLElementText('compat', self, parent_xpath='/target',
tag_name='compat')
accessors.XMLElementBool('lazy_refcounts', self,
parent_xpath='/target/features',
tag_name='lazy_refcounts')
super(VolXMLBase, self).__init__(virsh_instance=virsh_instance)
class VolXML(VolXMLBase):
"""
Manipulators of a Virtual Vol through it's XML definition.
"""
__slots__ = []
def __init__(self, vol_name='default', virsh_instance=base.virsh):
"""
Initialize new instance with empty XML
"""
super(VolXML, self).__init__(virsh_instance=virsh_instance)
self.xml = u"<volume><name>%s</name></volume>" % vol_name
def new_encryption(self, **dargs):
"""
Return a new volume encryption instance and set properties from dargs
"""
new_one = self.Encryption(virsh_instance=self.virsh)
for key, value in list(dargs.items()):
setattr(new_one, key, value)
return new_one
def create(self, pool_name, virsh_instance=base.virsh):
"""
Create volume with virsh from this instance
"""
result = virsh_instance.vol_create(pool_name, self.xml)
if result.exit_status:
return False
return True
@staticmethod
def new_from_vol_dumpxml(vol_name, pool_name, virsh_instance=base.virsh):
"""
Return new VolXML instance from virsh vol-dumpxml command
:param vol_name: Name of vol to vol-dumpxml
:param virsh_instance: virsh module or instance to use
:return: New initialized VolXML instance
"""
volxml = VolXML(virsh_instance=virsh_instance)
result = virsh_instance.vol_dumpxml(vol_name, pool_name)
volxml['xml'] = result.stdout_text.strip()
return volxml
@staticmethod
def get_vol_details_by_name(vol_name, pool_name, virsh_instance=base.virsh):
"""
Return volume xml dictionary by Vol's uuid or name.
:param vol_name: Vol's name
:return: volume xml dictionary
"""
volume_xml = {}
vol_xml = VolXML.new_from_vol_dumpxml(vol_name, pool_name,
virsh_instance)
volume_xml['key'] = vol_xml.key
volume_xml['path'] = vol_xml.path
volume_xml['capacity'] = vol_xml.capacity
volume_xml['allocation'] = vol_xml.allocation
try:
volume_xml['format'] = vol_xml.format
except LibvirtXMLNotFoundError:
volume_xml['format'] = None
return volume_xml
@staticmethod
def new_vol(**dargs):
"""
Return a new VolXML instance and set properties from dargs
:param dargs: param dictionary
:return: new VolXML instance
"""
new_one = VolXML(virsh_instance=base.virsh)
for key, value in list(dargs.items()):
setattr(new_one, key, value)
return new_one
class Encryption(base.LibvirtXMLBase):
"""
Encryption volume XML class
Properties:
format:
string.
secret:
dict, keys: type, uuid
"""
__slots__ = ('format', 'secret')
def __init__(self, virsh_instance=base.virsh):
accessors.XMLAttribute('format', self, parent_xpath='/',
tag_name='encryption', attribute='format')
accessors.XMLElementDict('secret', self, parent_xpath='/',
tag_name='secret')
super(VolXML.Encryption, self).__init__(
virsh_instance=virsh_instance)
self.xml = '<encryption/>'
| clebergnu/avocado-vt | virttest/libvirt_xml/vol_xml.py | Python | gpl-2.0 | 7,156 |
# -*- coding: utf-8 -*-
"""Helper methods for libtmux and downstream libtmux libraries."""
from __future__ import absolute_import, unicode_literals, with_statement
import contextlib
import logging
import os
import tempfile
import time
logger = logging.getLogger(__name__)
TEST_SESSION_PREFIX = 'libtmux_'
RETRY_TIMEOUT_SECONDS = int(os.getenv('RETRY_TIMEOUT_SECONDS', 8))
namer = tempfile._RandomNameSequence()
current_dir = os.path.abspath(os.path.dirname(__file__))
example_dir = os.path.abspath(os.path.join(current_dir, '..', 'examples'))
fixtures_dir = os.path.realpath(os.path.join(current_dir, 'fixtures'))
def retry(seconds=RETRY_TIMEOUT_SECONDS):
"""
Retry a block of code until a time limit or ``break``.
Parameters
----------
seconds : int
Seconds to retry, defaults to ``RETRY_TIMEOUT_SECONDS``, which is
configurable via environmental variables.
Returns
-------
bool
True if time passed since retry() invoked less than seconds param.
Examples
--------
>>> while retry():
... p = w.attached_pane
... p.server._update_panes()
... if p.current_path == pane_path:
... break
"""
return (lambda: time.time() < time.time() + seconds)()
def get_test_session_name(server, prefix=TEST_SESSION_PREFIX):
"""
Faker to create a session name that doesn't exist.
Parameters
----------
server : :class:`libtmux.Server`
libtmux server
prefix : str
prefix for sessions (e.g. ``libtmux_``). Defaults to
``TEST_SESSION_PREFIX``.
Returns
-------
str
Random session name guaranteed to not collide with current ones.
"""
while True:
session_name = prefix + next(namer)
if not server.has_session(session_name):
break
return session_name
def get_test_window_name(session, prefix=TEST_SESSION_PREFIX):
"""
Faker to create a window name that doesn't exist.
Parameters
----------
session : :class:`libtmux.Session`
libtmux session
prefix : str
prefix for windows (e.g. ``libtmux_``). Defaults to
``TEST_SESSION_PREFIX``.
ATM we reuse the test session prefix here.
Returns
-------
str
Random window name guaranteed to not collide with current ones.
"""
while True:
window_name = prefix + next(namer)
if not session.find_where(window_name=window_name):
break
return window_name
@contextlib.contextmanager
def temp_session(server, *args, **kwargs):
"""
Return a context manager with a temporary session.
If no ``session_name`` is entered, :func:`get_test_session_name` will make
an unused session name.
The session will destroy itself upon closing with :meth:`Session.
kill_session()`.
Parameters
----------
server : :class:`libtmux.Server`
Other Parameters
----------------
args : list
Arguments passed into :meth:`Server.new_session`
kwargs : dict
Keyword arguments passed into :meth:`Server.new_session`
Yields
------
:class:`libtmux.Session`
Temporary session
Examples
--------
>>> with temp_session(server) as session:
... session.new_window(window_name='my window')
"""
if 'session_name' in kwargs:
session_name = kwargs.pop('session_name')
else:
session_name = get_test_session_name(server)
session = server.new_session(session_name, *args, **kwargs)
try:
yield session
finally:
if server.has_session(session_name):
session.kill_session()
return
@contextlib.contextmanager
def temp_window(session, *args, **kwargs):
"""
Return a context manager with a temporary window.
The window will destroy itself upon closing with :meth:`window.
kill_window()`.
If no ``window_name`` is entered, :func:`get_test_window_name` will make
an unused window name.
Parameters
----------
session : :class:`libtmux.Session`
Other Parameters
----------------
args : list
Arguments passed into :meth:`Session.new_window`
kwargs : dict
Keyword arguments passed into :meth:`Session.new_window`
Yields
------
:class:`libtmux.Window`
temporary window
Examples
--------
>>> with temp_window(session) as window:
... my_pane = window.split_window()
"""
if 'window_name' not in kwargs:
window_name = get_test_window_name(session)
else:
window_name = kwargs.pop('window_name')
window = session.new_window(window_name, *args, **kwargs)
# Get ``window_id`` before returning it, it may be killed within context.
window_id = window.get('window_id')
try:
yield session
finally:
if session.findWhere(window_id=window_id):
window.kill_window()
return
class EnvironmentVarGuard(object):
"""Mock environmental variables safetly.
Helps rotect the environment variable properly. Can be used as context
manager.
Notes
-----
Vendorized to fix issue with Anaconda Python 2 not including test module,
see #121 [1]_
References
----------
.. [1] Just installed, "ImportError: cannot import name test_support".
GitHub issue for tmuxp. https://github.com/tmux-python/tmuxp/issues/121.
Created October 12th, 2015. Accessed April 7th, 2018.
"""
def __init__(self):
self._environ = os.environ
self._unset = set()
self._reset = dict()
def set(self, envvar, value):
if envvar not in self._environ:
self._unset.add(envvar)
else:
self._reset[envvar] = self._environ[envvar]
self._environ[envvar] = value
def unset(self, envvar):
if envvar in self._environ:
self._reset[envvar] = self._environ[envvar]
del self._environ[envvar]
def __enter__(self):
return self
def __exit__(self, *ignore_exc):
for envvar, value in self._reset.items():
self._environ[envvar] = value
for unset in self._unset:
del self._environ[unset]
| tony/libtmux | libtmux/test.py | Python | bsd-3-clause | 6,243 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryApplicationName": _SERIALIZER.url("gallery_application_name", gallery_application_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_gallery_request(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class GalleryApplicationsOperations(object):
"""GalleryApplicationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplication",
**kwargs: Any
) -> "_models.GalleryApplication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_application, 'GalleryApplication')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplication",
**kwargs: Any
) -> LROPoller["_models.GalleryApplication"]:
"""Create or update a gallery Application Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition is to be created.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be created
or updated. The allowed characters are alphabets and numbers with dots, dashes, and periods
allowed in the middle. The maximum length is 80 characters.
:type gallery_application_name: str
:param gallery_application: Parameters supplied to the create or update gallery Application
operation.
:type gallery_application: ~azure.mgmt.compute.v2021_07_01.models.GalleryApplication
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either GalleryApplication or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_07_01.models.GalleryApplication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application=gallery_application,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplicationUpdate",
**kwargs: Any
) -> "_models.GalleryApplication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_application, 'GalleryApplicationUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
gallery_application: "_models.GalleryApplicationUpdate",
**kwargs: Any
) -> LROPoller["_models.GalleryApplication"]:
"""Update a gallery Application Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition is to be updated.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be updated.
The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the
middle. The maximum length is 80 characters.
:type gallery_application_name: str
:param gallery_application: Parameters supplied to the update gallery Application operation.
:type gallery_application: ~azure.mgmt.compute.v2021_07_01.models.GalleryApplicationUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either GalleryApplication or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_07_01.models.GalleryApplication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
gallery_application=gallery_application,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> "_models.GalleryApplication":
"""Retrieves information about a gallery Application Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery from which the Application
Definitions are to be retrieved.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be
retrieved.
:type gallery_application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryApplication, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_07_01.models.GalleryApplication
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryApplication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_application_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Delete a gallery Application.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery in which the Application
Definition is to be deleted.
:type gallery_name: str
:param gallery_application_name: The name of the gallery Application Definition to be deleted.
:type gallery_application_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_application_name=gallery_application_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}'} # type: ignore
@distributed_trace
def list_by_gallery(
self,
resource_group_name: str,
gallery_name: str,
**kwargs: Any
) -> Iterable["_models.GalleryApplicationList"]:
"""List gallery Application Definitions in a gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Application Gallery from which Application
Definitions are to be listed.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryApplicationList or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_07_01.models.GalleryApplicationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryApplicationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=self.list_by_gallery.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("GalleryApplicationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_gallery.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications'} # type: ignore
| Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/operations/_gallery_applications_operations.py | Python | mit | 33,162 |
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest, functional
import uuid
import time
class ElasticFileSystem(BaseTest):
@functional
def test_resource_manager(self):
factory = self.replay_flight_data('test_efs_query')
client = factory().client('efs')
token = str(uuid.uuid4())
fs_id = client.create_file_system(
CreationToken=token).get('FileSystemId')
tags = [{'Key': 'Name', 'Value': 'Somewhere'}]
client.create_tags(FileSystemId=fs_id, Tags=tags)
if self.recording:
time.sleep(5)
self.addCleanup(client.delete_file_system, FileSystemId=fs_id)
p = self.load_policy({
'name': 'efs-query',
'resource': 'efs',
'filters': [{'tag:Name': 'Somewhere'}],
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Tags'], tags)
def test_mount_target_loading(self):
factory = self.replay_flight_data('test_efs_subresource')
p = self.load_policy({
'name': 'test-mount-targets',
'resource': 'efs-mount-target',
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 2)
def test_delete(self):
factory = self.replay_flight_data('test_efs_delete')
p = self.load_policy({
'name': 'efs-query',
'resource': 'efs',
'filters': [{'Name': 'MyDocs'}],
'actions': ['delete']
}, session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Name'], 'MyDocs')
client = factory().client('efs')
state = client.describe_file_systems().get('FileSystems', [])
self.assertEqual(state, [])
| jimmyraywv/cloud-custodian | tests/test_efs.py | Python | apache-2.0 | 2,532 |
#!/usr/bin/env python3
import pprint
import argparse
import inspect
import json
import os
import requests
import argh
import subprocess
import urllib.parse
API_ENDPOINT = 'https://api.digitalocean.com/v2'
DEBUG = False
DO_API_TOKEN = os.environ.get('DO_API_TOKEN')
DO_KEYPAIR_ID = os.environ.get('DO_KEYPAIR_ID')
DO_KEY = os.environ.get('DO_KEY')
REGS = ['ams', 'fra', 'lon', 'nyc', 'sfo', 'sgp', 'tor']
class C:
blue = '\033[94m'
green = '\033[92m'
red = '\033[91m'
end = '\033[0m'
def R(msg):
return C.red + msg + C.end
def G(msg):
return C.green + msg + C.end
def B(msg):
return C.blue + msg + C.end
def mergedicts(dict1, dict2):
for k in dict2.keys():
if k in dict1:
if type(dict2[k]) is list:
# dict1[k] is most likely list
dict1[k].extend(dict2[k])
else:
l = list(dict1[k], dict2[k])
dict1[k] = l
else:
dict1[k] = dict2[k]
class DoError(RuntimeError):
pass
def callCheck(command, env=None, stdin=None):
print("about to run\n%s" % command)
if subprocess.call(command.split(), env=env, stdin=stdin):
raise Exception("%s failed." % command)
def print_debug():
import http.client
""" this will print HTTP requests sent """
# http://stackoverflow.com/questions/20658572/
# python-requests-print-entire-http-request-raw
global DEBUG
DEBUG = True
old_send = http.client.HTTPConnection.send
def new_send( self, data ):
print("REQUEST:")
print(urllib.parse.unquote(data.decode()))
return old_send(self, data)
http.client.HTTPConnection.send = new_send
class MyAction(argparse.Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(MyAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
# this gets called when -d/--debug is passed
def __call__(self, parser, namespace, values, option_string=None):
print_debug()
pass
def get_id_by_attr(res_pattern, res_list, attr='name'):
result_list = [i['id'] for i in res_list if i[attr] == res_pattern]
if len(result_list) > 1:
raise DoError("name %s is valid for more ids: %s " %
(res_pattern, result_list))
if len(result_list) == 0:
raise DoError("no resources found for %s, whole list: %s " %
(res_pattern, res_list))
return result_list[0]
def get_regions_string(s):
ret = []
for r in REGS:
t = [ a[-1] for a in s if a.startswith(r) ]
ret.append(r+(",".join(t)))
return " ".join(ret)
class DoManager(object):
def __init__(self, api_key):
self.api_endpoint = API_ENDPOINT
self.api_key = api_key
def all_active_droplets(self, fetch_all=True):
json_out = self.request(path='/droplets/', fetch_all=fetch_all)
return json_out['droplets']
def get_key_id(self, key_name):
return get_id_by_attr(key_name, self.all_ssh_keys())
def get_droplet_id_or_name(self, id_or_name):
if not id_or_name.isdigit():
tmp = get_id_by_attr(id_or_name, self.all_active_droplets())
id = tmp
else:
id = id_or_name
return id
@argh.aliases('nf','newfloatingip')
def new_floating_ip(self, droplet_id):
params = {'droplet_id': droplet_id}
json_out = self.request('/floating_ips', params=params, method='POST')
return json_out['floating_ip']
@argh.aliases('c','create')
def create_droplet(self, name, ssh_keys=DO_KEYPAIR_ID,
image='coreos-stable', region='ams2', size='512mb',
private_networking=False, backups_enabled=False,
user_data=None, ipv6=None):
"Creates droplet. see help for defualts"
ud = None
if user_data:
with open(user_data, 'r') as f:
ud = f.read()
keys = ssh_keys.split(",")
params = {
'name': name,
'size': size,
'image': image,
'region': region,
'private_networking': private_networking,
'user_data': ud,
'ipv6': ipv6,
'backups': backups_enabled,
}
params['ssh_keys'] = keys
json_out = self.request('/droplets', params=params, method='POST')
return json_out['droplet']
def show_droplet(self, did):
json_out = self.request('/droplets/%s' % did)
#self.get_droplet_id_or_name(did))
return json_out['droplet']
@argh.aliases('show')
def show_droplet_readable(self, id):
pprint.pprint(self.show_droplet(id))
def droplet_v2_action(self, id, type, params={}):
params['type'] = type
json_out = self.request('/droplets/%s/actions' % id, params=params, method='POST')
return json_out
@argh.aliases('reboot')
def reboot_droplet(self, did):
json_out = self.droplet_v2_action(did, 'reboot')
json_out.pop('status', None)
return json_out
def power_cycle_droplet(self, id):
json_out = self.droplet_v2_action(id, 'power_cycle')
json_out.pop('status', None)
return json_out
def shutdown_droplet(self, id):
json_out = self.droplet_v2_action(id, 'shutdown')
json_out.pop('status', None)
return json_out
def power_off_droplet(self, id):
json_out = self.droplet_v2_action(id, 'power_off')
json_out.pop('status', None)
return json_out
def power_on_droplet(self, id):
json_out = self.droplet_v2_action(id, 'power_on')
json_out.pop('status', None)
return json_out
def password_reset_droplet(self, id):
json_out = self.droplet_v2_action(id, 'password_reset')
json_out.pop('status', None)
return json_out
def resize_droplet(self, id, size_id):
params = {'size': size_id}
json_out = self.droplet_v2_action(id, 'resize', params)
json_out.pop('status', None)
return json_out
def snapshot_droplet(self, id, name):
params = {'name': name}
json_out = self.droplet_v2_action(id, 'snapshot', params)
json_out.pop('status', None)
return json_out
def restore_droplet(self, id, image_id):
params = {'image': image_id}
json_out = self.droplet_v2_action(id, 'restore', params)
json_out.pop('status', None)
return json_out
@argh.aliases('reb','rebuild')
def rebuild_droplet(self, id, image_id):
params = {'image': image_id}
json_out = self.droplet_v2_action(id, 'rebuild', params)
json_out.pop('status', None)
return json_out
def enable_backups_droplet(self, id):
json_out = self.droplet_v2_action(id, 'enable_backups')
json_out.pop('status', None)
return json_out
def disable_backups_droplet(self, id):
json_out = self.droplet_v2_action(id, 'disable_backups')
json_out.pop('status', None)
return json_out
def rename_droplet(self, id, name):
params = {'name': name}
json_out = self.droplet_v2_action(id, 'rename', params)
json_out.pop('status', None)
return json_out
@argh.aliases('d','destroy')
def destroy_droplet(self, id, force=False):
_id = self.get_droplet_id_or_name(id)
answer = "y"
if not force:
answer = input("Do you really want to remove the droplet[y/n]: ")
if answer == "y":
json_out = self.request('/droplets/%s' % _id, method='DELETE')
json_out.pop('status', None)
return json_out
#regions==========================================
def all_regions(self):
json_out = self.request('/regions/')
return json_out['regions']
#images==========================================
def image_v2_action(self, id, type, params={}):
params = {
'type': type
}
json_out = self.request('/images/%s/actions' % id, params=params, method='POST')
return json_out
def show_image(self, image_id):
params= {'image_id': image_id}
json_out = self.request('/images/%s' % image_id)
return json_out['image']
def destroy_image(self, image_id):
self.request('/images/%s' % id, method='DELETE')
return True
def transfer_image(self, image_id, region_id):
params = {'region': region_id}
json_out = self.image_v2_action(id, 'transfer', params)
json_out.pop('status', None)
return json_out
#ssh_keys=========================================
def all_ssh_keys(self):
json_out = self.request('/account/keys')
return json_out['ssh_keys']
def new_ssh_key(self, name, pub_key):
params = {'name': name, 'public_key': pub_key}
json_out = self.request('/account/keys', params, method='POST')
return json_out['ssh_key']
def show_ssh_key(self, key_id):
json_out = self.request('/account/keys/%s/' % key_id)
return json_out['ssh_key']
def edit_ssh_key(self, key_id, name, pub_key):
params = {'name': name} # v2 API doesn't allow to change key body now
json_out = self.request('/account/keys/%s/' % key_id, params, method='PUT')
return json_out['ssh_key']
def destroy_ssh_key(self, key_id):
self.request('/account/keys/%s' % key_id, method='DELETE')
return True
#domains==========================================
def all_domains(self):
json_out = self.request('/domains/')
return json_out['domains']
def new_domain(self, name, ip):
params = {
'name': name,
'ip_address': ip
}
json_out = self.request('/domains', params=params, method='POST')
return json_out['domain']
def show_domain(self, domain_id):
json_out = self.request('/domains/%s/' % domain_id)
return json_out['domain']
def destroy_domain(self, domain_id):
self.request('/domains/%s' % domain_id, method='DELETE')
return True
def all_domain_records(self, domain_id):
json_out = self.request('/domains/%s/records/' % domain_id)
return json_out['domain_records']
def new_domain_record(self, domain_id, record_type, data, name=None, priority=None, port=None, weight=None):
params = {'data': data}
params['type'] = record_type
if name: params['name'] = name
if priority: params['priority'] = priority
if port: params['port'] = port
if weight: params['weight'] = weight
json_out = self.request('/domains/%s/records/' % domain_id, params, method='POST')
return json_out['record']
def show_domain_record(self, domain_id, record_id):
json_out = self.request('/domains/%s/records/%s' % (domain_id, record_id))
return json_out['domain_record']
def destroy_domain_record(self, domain_id, record_id):
self.request('/domains/%s/records/%s' % (domain_id, record_id), method='DELETE')
return True
@argh.aliases('f','floatips')
def list_floatips(self):
json_out = self.request('/floating_ips')
for fip in json_out['floating_ips']:
form = "%s in %s on %s"
dn = "None"
if fip['droplet']:
dn = fip['droplet']['name']
fields = (B(fip['ip']), G(fip['region']['slug']), R(dn))
print(form % fields)
#events(actions in v2 API)========================
@argh.aliases('a','actions')
def show_actions(self, type="all"):
actions = self.show_all_actions()
for a in actions:
if type == "all" or a['type'] == type:
form = "%s on %s, id %s, finished on %s"
sanit = lambda s: str(s) if s else ""
ttype = sanit(a['type'])
rtype = sanit(a['resource_type'])
rid = sanit(a['resource_id'])
compl = sanit(a['completed_at'])
#print(ttype,rtype,rid,compl)
fields = (B(ttype), G(rtype), R(rid), B(compl))
print(form % fields)
def show_all_actions(self):
json_out = self.request('/actions')
return json_out['actions']
def show_action(self, action_id):
json_out = self.request('/actions/%s' % action_id)
return json_out['action']
def show_event(self, event_id):
return self.show_action(event_id)
#low_level========================================
def request(self, path, params={}, method='GET', fetch_all=False):
if not path.startswith('/'):
path = '/'+path
url = self.api_endpoint+path
headers = { 'Authorization': "Bearer %s" % self.api_key }
headers['Content-Type'] = 'application/json'
resp = {}
while True:
tmp = self.request_v2(url, params=params, headers=headers,
method=method)
has_next = False
if 'links' in tmp:
has_next = 'pages' in tmp['links']
if has_next:
has_next = 'next' in tmp['links']['pages']
if fetch_all and has_next:
u = urllib.parse.urlparse(tmp['links']['pages']['next'])
next_page = urllib.parse.parse_qs(u.query)['page'][0]
params['page'] = next_page
del(tmp['links'])
del(tmp['meta'])
#resp.update(tmp)
mergedicts(resp, tmp)
else:
mergedicts(resp, tmp)
break
return resp
def request_v2(self, url, headers={}, params={}, method='GET'):
try:
if method == 'POST':
resp = requests.post(url, data=json.dumps(params), headers=headers, timeout=60)
json_out = resp.json()
elif method == 'DELETE':
resp = requests.delete(url, headers=headers, timeout=60)
json_out = {'status': resp.status_code}
elif method == 'PUT':
resp = requests.put(url, headers=headers, params=params, timeout=60)
json_out = resp.json()
elif method == 'GET':
resp = requests.get(url, headers=headers, params=params, timeout=60)
json_out = resp.json()
else:
raise DoError('Unsupported method %s' % method)
except ValueError: # requests.models.json.JSONDecodeError
raise ValueError("The API server doesn't respond with a valid json_out")
except requests.RequestException as e: # errors from requests
raise RuntimeError(e)
if DEBUG:
print("RESPONSE:")
print(resp.status_code)
print(json.dumps(json_out, sort_keys=True, indent=4))
if resp.status_code != requests.codes.ok: #pylint: disable=no-member
if json_out:
if 'error_message' in json_out:
raise DoError(json_out['error_message'])
elif 'message' in json_out:
raise DoError(json_out['message'])
# The JSON reponse is bad, so raise an exception with the HTTP status
resp.raise_for_status()
if json_out.get('id') == 'not_found':
raise DoError(json_out['message'])
return json_out
@argh.aliases('s')
def ssh(self, fuzzy_name, user='core', key=DO_KEY, port='22'):
chosen = [d for d in self.all_active_droplets()
if fuzzy_name == d['name']]
if len(chosen) > 2 :
raise DoError("name too ambiguous")
if len(chosen) == 0 :
raise DoError("no droplet by that name")
ip = self.get_public_ip(chosen[0])
cmd = "ssh -o IdentitiesOnly=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s -p %s %s@%s" % (DO_KEY, port, user, ip)
callCheck(cmd)
def get_private_ip(self, d):
for n in d['networks']['v4']:
if n['type'] == 'private':
return n['ip_address']
def get_public_ip(self, d):
for n in d['networks']['v4']:
if n['type'] == 'public':
return n['ip_address']
def status(self, s):
if s == "new":
return G(s)
if s == "active":
return B(s)
if s in ["off","archive"]:
return R(s)
def droplets(self, fetch_all=False):
for d in self.all_active_droplets(fetch_all):
form = "%s %s[%s] %s - %s, %s"
fields = (G(str(d['id'])), B(d['name']), G(d['region']['slug']),
self.status(d['status']), self.get_public_ip(d),
self.get_private_ip(d))
print(form % fields)
def avail(self, s):
if s:
return G("available")
else:
return R("not avail")
@argh.aliases('s')
def sizes(self):
json_out = self.request('/sizes/')
for s in json_out['sizes']:
form = "%s: %s vcpus, at %s"
print(form % (R(s['slug']), G(str(s['vcpus'])), B(get_regions_string(s['regions']))))
@argh.aliases('r')
def regions(self):
for r in self.all_regions():
form = "%s: %s, features: %s"
fields = (B(r['slug']),
self.avail(r['available']), ",".join(r['features']))
print(form % fields)
@argh.aliases('i')
@argh.arg('--type', '-t', choices=['application', 'distribution'])
@argh.arg('--private', '-p', default=False)
def images(self, type='', private=False, fetch_all=False):
params = {}
if type:
params = {'type': type}
if private:
params = {'private': 'true'}
for i in self.request('/images/', params=params, fetch_all=fetch_all)['images']:
form = "%s %s at %s"
name = i['slug']
if not name:
name = i['name']
print(form % (R(name), G(str(i['id'])), B(",".join( i['regions'] ) )))
@argh.aliases('k')
def keypairs(self):
for k in self.all_ssh_keys():
form = "%s: id %s, \'%s\'"
fields = (R(k['name']), B(str(k['id'])), k['public_key'])
print(form % fields)
class Proxy(object):
_manager = None
def __new__(cls, *args, **kwargs):
if not cls._manager:
api_token = DO_API_TOKEN
cls._manager = DoManager(api_token)
return cls._manager
#def init():
# """ checks if credentials are present and initalizes the module """
# manager = Proxy()
#
# current_module = __import__(__name__)
#
# for name, method in inspect.getmembers(manager, inspect.ismethod):
# if name != "__init__":
# setattr(current_module, name, method)
if __name__ == "__main__":
do = DoManager(DO_API_TOKEN)
parser = argh.ArghParser()
exposed = [do.create_droplet, do.ssh, do.droplets, do.regions, do.keypairs,
do.destroy_droplet, do.show_droplet_readable, do.images,
do.show_actions, do.rebuild_droplet, do.list_floatips,
do.sizes,
do.new_floating_ip, do.reboot_droplet]
argh.assembling.set_default_command(parser, do.droplets)
parser.add_commands(exposed)
parser.add_argument('-d', '--debug', const=False, action=MyAction)
parser.dispatch()
| t0mk/dosl | dosl.py | Python | mit | 19,968 |
from xml.dom import minidom
from geopy.distance import distance
import os, datetime, json
os.chdir('data')
runs = []
print 'progreso,ritmo,tipo'
for filename in os.listdir(os.getcwd()):
e = minidom.parse(filename)
run = {}
total_time = 0
total_distance = 0
raw_points = []
enriched_points = []
for trkpt in e.getElementsByTagName('trkpt'):
point = {}
lon = float(trkpt.attributes['lon'].value)
point['lon'] = float(trkpt.attributes['lon'].value)
point['lat'] = float(trkpt.attributes['lat'].value)
time = trkpt.getElementsByTagName('time')[0].firstChild.wholeText
point['time'] = datetime.datetime.strptime(time, "%Y-%m-%dT%H:%M:%SZ")
raw_points.append(point)
for i in range(0, len(raw_points) - 1):
enriched_point = {}
current_point = raw_points[i]
next_point = raw_points[i + 1]
time_delta = (next_point['time'] - current_point['time']).total_seconds()
total_time += time_delta
mean_time = current_point['time'] + datetime.timedelta(0, time_delta/2)
enriched_point['mean_time'] = mean_time
current_position = (current_point['lat'], current_point['lon'])
next_position = (next_point['lat'], next_point['lon'])
dist = distance(current_position, next_position).meters
total_distance += dist
enriched_point['distance'] = dist
enriched_point['acum_distance'] = total_distance
enriched_point['time_delta'] = time_delta
if time_delta != 0:
enriched_point['speed'] = (dist/1000)/(time_delta/3600)
else:
enriched_point['speed'] = 0
if (dist != 0):
enriched_point['pace'] = (time_delta/60)/(dist/1000)
else:
enriched_point['pace'] = 0
enriched_points.append(enriched_point)
#for point in raw_points:
# point['time'] = point['time'].isoformat()
#for point in enriched_points:
# point['mean_time'] = point['mean_time'].isoformat()
# run['raw_points'] = raw_points
run['enriched_points'] = enriched_points
run['total_distance'] = total_distance
run['total_distance_km'] = total_distance/1000.0
run['total_time'] = total_time
run['total_time_min'] = total_time/60.0
run['avg_pace_min_km'] = (total_time/60)/(total_distance/1000)
run['avg_speed_km/h'] = (total_distance/1000)/(total_time/3600)
# print (total_distance/1000.0).__str__() + ',' + ((total_time/60)/(total_distance/1000)).__str__() + ',' + raw_points[0]['time'].year.__str__()
runs.append(run)
normalized_pace_profiles = [{}, {}, {}]
for run in runs:
total = run['total_distance']
for point in run['enriched_points']:
if total < 3000:
cat = 1
elif total < 6000:
cat = 2
else:
cat = 3
normalized_pace_profiles[cat - 1][int(point['acum_distance'] * 100 / total).__str__()] = point['pace']
i = 1
for profile in normalized_pace_profiles:
for key in profile:
pass
# print key, ',', profile[key], ',', i
i += 1
print(json.dumps(normalized_pace_profiles))
| santi698/infovis | runkeeper-stats/parse_data.py | Python | gpl-3.0 | 2,938 |
import json
from flask import request
from redash import models
from redash.handlers.base import BaseResource
from redash.permissions import (require_access,
require_object_modify_permission,
require_permission, view_only)
class WidgetListResource(BaseResource):
@require_permission('edit_dashboard')
def post(self):
widget_properties = request.get_json(force=True)
dashboard = models.Dashboard.get_by_id_and_org(widget_properties.pop('dashboard_id'), self.current_org)
require_object_modify_permission(dashboard, self.current_user)
widget_properties['options'] = json.dumps(widget_properties['options'])
widget_properties.pop('id', None)
widget_properties['dashboard'] = dashboard
visualization_id = widget_properties.pop('visualization_id')
if visualization_id:
visualization = models.Visualization.get_by_id_and_org(visualization_id, self.current_org)
require_access(visualization.query_rel.groups, self.current_user, view_only)
else:
visualization = None
widget_properties['visualization'] = visualization
widget = models.Widget(**widget_properties)
models.db.session.add(widget)
models.db.session.commit()
layout = json.loads(widget.dashboard.layout)
new_row = True
if len(layout) == 0 or widget.width == 2:
layout.append([widget.id])
elif len(layout[-1]) == 1:
neighbour_widget = models.Widget.query.get(layout[-1][0])
if neighbour_widget.width == 1:
layout[-1].append(widget.id)
new_row = False
else:
layout.append([widget.id])
else:
layout.append([widget.id])
widget.dashboard.layout = json.dumps(layout)
models.db.session.add(widget.dashboard)
models.db.session.commit()
return {'widget': widget.to_dict(), 'layout': layout, 'new_row': new_row, 'version': dashboard.version}
class WidgetResource(BaseResource):
@require_permission('edit_dashboard')
def post(self, widget_id):
# This method currently handles Text Box widgets only.
widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
require_object_modify_permission(widget.dashboard, self.current_user)
widget_properties = request.get_json(force=True)
widget.text = widget_properties['text']
models.db.session.commit()
return widget.to_dict()
@require_permission('edit_dashboard')
def delete(self, widget_id):
widget = models.Widget.get_by_id_and_org(widget_id, self.current_org)
require_object_modify_permission(widget.dashboard, self.current_user)
widget.delete()
models.db.session.commit()
return {'layout': widget.dashboard.layout, 'version': widget.dashboard.version}
| stefanseifert/redash | redash/handlers/widgets.py | Python | bsd-2-clause | 2,967 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import cookielib
import sys
import urllib
from BeautifulSoup import BeautifulSoup
import mechanize
class WebPttBot(object):
def __init__(self,board):
self._board = board
# Browser
self._br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
self._br.set_cookiejar(cj)
# Browser options
self._br.set_handle_equiv(True)
self._br.set_handle_redirect(True)
self._br.set_handle_referer(True)
self._br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
self._br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# User-Agent (this is cheating, ok?)
self._br.addheaders = [('User-agent', "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1)"
" Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1")]
def __checkPage(self):
response = self._br.open(self._url)
currentUrl = response.geturl()
print currentUrl
if "over18" in currentUrl:
return self.___confirmOver18()
else:
return True
def ___confirmOver18(self):
self._br.select_form(nr=0)
response = self._br.submit()
url = response.geturl()
if self._board+'/index.html' in url:
return True
else:
return False
def getBrowser(self):
return self._br
def getHtmlContent(self, url):
self._url = url
if self.__checkPage() is True:
response = self._br.open(self._url)
return response.read()
else:
raise Exception("Can not open target page")
| eternnoir/PyrserPTT | PyrserPTT/PttHtmlGraber.py | Python | gpl-2.0 | 1,773 |
#!/usr/bin/python
#
#
#
import sys
import os
import datetime
import commands
import re
import time
import simplejson as json
from optparse import OptionParser
def run(inJsonFilename):
inJson = open(inJsonFilename).read()
data = json.loads(inJson)
# Add a name for the output file that will be generated
for item in data:
newAudioFile = item['audioFile'].replace("/","_")
item['inputFile'] = "/data/django/orchive/audio/%s.wav" % (item['audioFile'])
item['outputFile'] = "/tmp/%s-%s-%s.wav" % (newAudioFile, item['startSec'], item['endSec'])
# Run sox on each input file
for item in data:
startSec = float(item['startSec'])
endSec = float(item['endSec'])
lengthSec = endSec - startSec
command = "sox %s %s trim %f %f" % (item['inputFile'], item['outputFile'], startSec, lengthSec)
a = commands.getoutput(command)
# Make .mf file
ts = time.time()
mfFilename = "/tmp/bextract-%i.mf" % ts
mfFile = open(mfFilename, "w")
for item in data:
mfFile.write("%s\t%s\n" % (item['outputFile'], item['label']))
mfFile.close()
# Run bextract on audio file
mplFilename = "/tmp/bextract-%i.mpl" % ts
command = "bextract %s -pm -p %s" % (mfFilename, mplFilename)
a = commands.getoutput(command)
# Return .mpl file as text
mplFile = open(mplFilename, "r")
mplData = mplFile.read()
# Remove temporary audio files when done
for item in data:
os.remove(item['outputFile'])
os.remove(mfFilename)
os.remove(mplFilename)
print mplData
if __name__ == "__main__":
usage = "usage: %prog [options] in.json"
parser = OptionParser(usage)
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
run(args[0])
| eriser/marsyas | src/apps/openmir/omTrainClassifier.py | Python | gpl-2.0 | 1,847 |
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
from django.conf import settings
from contact.forms import ContactForm
def contact(request):
if request.method == 'POST': # If the form has been submitted...
form = ContactForm(request.POST or None) # A form bound to the POST data
if form.is_valid():
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
sender = form.cleaned_data['sender']
cc_myself = form.cleaned_data['cc_myself']
recipients = ['tarselmj@clarkson.edu']
#recipients = settings.RECIPIENTS
if cc_myself:
recipients.append(sender)
send_mail(subject, message, sender, recipients)
return render(request, 'thankyou.html') # Redirect after POST # All validation rules pass
# Process the data in form.cleaned_data
# ...
# return HttpResponseRedirect('/thankyou/') # Redirect after POST
else:
form = ContactForm() # An unbound form
return render(request, 'contact.html', {'form': form})
| mtarsel/djangoclass | simplewebsiteEXAMPLE/contact/views.py | Python | gpl-2.0 | 1,200 |
import datetime
from satoshidicetools.api import satoshidice
class DiceRoller:
"""
DiceRoller bot skeleton.
"""
def __init__(self, secret):
self.api = satoshidice.Api(secret=secret)
def bet(self, bet_in_satoshis, below_roll_to_win, client_roll):
result = self.api.place_bet(bet_in_satoshis, below_roll_to_win, client_roll)
self.output_bet_info(result)
def output_bet_info(self, result):
print "{date}:".format(date=datetime.datetime.now())
print "bet={bet:.8f} payout={payout:.8f} profit={profit:.8f} balance={balance:.8f} outcome={outcome} probability={probability}% target={target} roll={roll}".format(
bet=result["bet"]["betInSatoshis"],
payout=result["bet"]["payoutInSatoshis"],
profit=result["bet"]["profitInSatoshis"],
balance=result["userBalanceInSatoshis"],
probability=result["bet"]["probability"],
target=result["bet"]["target"],
roll=result["bet"]["roll"],
outcome=result["bet"]["result"])
print "\n"
| tranqil/satoshi-dice-tools | satoshidicetools/api/examples/diceroller.py | Python | mit | 1,052 |
import numpy as np
def split(frac_training,frac_valid, datafile, file_train, file_valid, file_test):
f_train = open(file_train, "w")
f_valid = open(file_valid, "w")
f_test = open(file_test, "w")
ntrain = 0
nvalid = 0
ntest = 0
with open(datafile, "r") as f:
for line in f:
setID = np.argmax(np.random.multinomial(1, [frac_training, frac_valid, 1-frac_training-frac_valid], size = 1))
if setID == 0:
f_train.write(line)
ntrain += 1
elif setID == 1:
f_valid.write(line)
nvalid += 1
elif setID == 2:
f_test.write(line)
ntest += 1
else:
print "error"
print ntrain
print nvalid
print ntest
if __name__ == "__main__":
frac_training = 0.7
frac_valid = 0.1
datafile = "data/reaction_NYTWaPoWSJ_K10"
split(frac_training = frac_training, frac_valid = frac_valid,
datafile = datafile,
file_train = datafile + "_" + str(frac_training)+"train",
file_valid = datafile + "_" + str(frac_valid)+"valid",
file_test = datafile + "_" + str(1- frac_training - frac_valid)+"test")
| JasonLC506/CollaborativeFiltering | traintestSplit.py | Python | mit | 1,241 |
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -mtailchaser` python will execute
``__main__.py`` as a script. That means there won't be any
``tailchaser.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``tailchaser.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import logging
import sys
from .tailer import Tailer
log = logging.getLogger(__name__)
def main(argv=sys.argv, consumer=None):
"""
Args:
argv (list): List of arguments
consumer: an optional consumer
Returns:
int: A return code
Does stuff.
"""
Tailer.cli(argv, consumer=consumer)
return 0
| thanos/tailchaser | src/tailchaser/cli.py | Python | bsd-2-clause | 976 |
"""
Streams represent basic IO objects, used by devices for reading or writing
(streams) of data.
``Stream`` object encapsulates an actual IO object - ``file``-like stream,
raw file descriptor, or even something completely different. ``Stream`` classes
then provide basic IO methods for moving data to and from stream, shielding
user from implementation details, like Python2/Python3 differencies.
"""
import abc
import errno
import fcntl
import io
import os
import termios
import tty
from six import PY2, integer_types, string_types, add_metaclass
from .errors import InvalidResourceError
from .util import isfile
def fd_blocking(fd, block = None):
"""
Query or set blocking mode of file descriptor.
:type int fd: file descriptor to manipulate.
:type bool block: if set, method will set blocking mode of file descriptor
accordingly: ``True`` means blocking, ``False`` non-blocking mode. If not
set, the current setting will be returned. ``None`` by default.
:rtype: bool
:returns: if ``block`` is ``None``, current setting of blocking mode is
returned - ``True`` for blocking, ``False`` for non-blocking. Othwerwise,
function returns nothing.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
if block is None:
return (flags & os.O_NONBLOCK) == 0
if block is True:
flags &= ~os.O_NONBLOCK
else:
flags |= os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
@add_metaclass(abc.ABCMeta)
class Stream(object):
"""
Abstract base class of all streams.
:param machine: parent :py:class`ducky.machine.Machine` object.
:param desc: description of stream. This is a short, string representation
of the stream.
:param stream: ``file``-like stream that provides IO method (``read()`` or
``write``). If it is set, it is preferred IO object.
:param int fd: raw file descriptor. ``stream`` takes precedence, otherwise this
file descriptor is used.
:param bool close: if ``True``, and if ``stream`` has a ``close()`` method, stream
will provide ``close()`` method that will close the underlaying ``file``-like
object. ``True`` by default.
:param bool allow_close: if not ``True``, stream's ``close()`` method will *not*
close underlying IO resource. ``True`` by default.
"""
def __init__(self, machine, desc, stream = None, fd = None, close = True, allow_close = True):
if stream is None and fd is None:
raise InvalidResourceError('Stream "%s" must have stream object or raw file descriptor.' % desc)
self.logger = machine.LOGGER
self.DEBUG = machine.LOGGER.debug
self.desc = desc
self.stream = stream
self.fd = fd
self._raw_read = self._raw_read_stream if stream is not None else self._raw_read_fd
self._raw_write = self._raw_write_stream if stream is not None else self._raw_write_fd
self.allow_close = allow_close
self._close = None
if close is True and stream is not None and hasattr(stream, 'close'):
self._close = stream.close
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.desc)
def has_fd(self):
"""
Check if stream has raw file descriptor. File descriptors can be checked
for IO availability by reactor's polling task.
:rtype: bool
:returns: ``True`` when stream has file descriptor.
"""
return self.fd is not None
def has_poll_support(self):
"""
Streams that can polled for data should return ``True``.
:rtype: bool
"""
# For most common case, if the stream has file descriptor set, it can be polled.
return self.has_fd()
def register_with_reactor(self, reactor, **kwargs):
"""
Called by owner to register the stream with reactor's polling service.
See :py:meth:`ducky.reactor.Reactor.add_fd` for keyword arguments.
:param ducky.reactor.Reactor reactor: reactor instance to register with.
"""
reactor.add_fd(self.fd, **kwargs)
def unregister_with_reactor(self, reactor):
"""
Called by owner to unregister the stream with reactor's polling service,
e.g. when stream is about to be closed.
:param ducky.reactor.Reactor reactor: reactor instance to unregister from.
"""
reactor.remove_fd(self.fd)
def _raw_read_stream(self, size = None):
self.DEBUG('%s._raw_read_stream: size=%s', self.__class__.__name__, size)
size = size or 0
return self.stream.read(size)
def _raw_read_fd(self, size = None):
self.DEBUG('%s._raw_read_fd: size=%s', self.__class__.__name__, size)
size = size or io.DEFAULT_BUFFER_SIZE
return os.read(self.fd, size)
def _raw_write_stream(self, data):
self.DEBUG('%s._raw_write_stream: data="%s", len=%s, type=%s', self.__class__.__name__, data, len(data), type(data))
remaining_chars = len(data)
while remaining_chars > 0:
try:
self.stream.write(data)
return
except io.BlockingIOError as e:
remaining_chars -= e.characters_written
continue
except EnvironmentError as e:
# Resource temporarily unavailable
if e.errno == 11:
continue
raise e
def _raw_write_fd(self, data):
self.DEBUG('%s._raw_write_fd: data="%s", len=%s, type=%s', self.__class__.__name__, data, len(data), type(data))
os.write(self.fd, data)
@abc.abstractmethod
def read(self, size = None):
"""
Read data from stream.
:param int size: if set, read at maximum ``size`` bytes.
:rtype: ``bytearray`` (Python2), ``bytes`` (Python3)
:returns: read data, of maximum lenght of ``size``, ``None`` when there are
no available data, or empty string in case of EOF.
"""
raise NotImplementedError('%s does not implement read method' % self.__class__.__name__)
@abc.abstractmethod
def write(self, buff):
"""
Write data to stream.
:param bytearray buff: data to write. ``bytearray`` (Python2), ``bytes`` (Python3)
"""
raise NotImplementedError('%s does not implement write method' % self.__class__.__name__)
def close(self):
"""
This method will close the stream. If ``allow_close`` flag is not set
to ``True``, nothing will happen. If the stream wasn't created with ``close``
set to ``True``, nothing will happen. If the wrapped IO resource does not
have ``close()`` method, nothing will happen.
"""
self.DEBUG('%s.close: allow_close=%s, _close=%s', self.__class__.__name__, self.allow_close, self._close)
if not self.allow_close:
self.DEBUG('%s.close: not allowed', self.__class__.__name__)
return
if self._close is not None:
self._close()
class InputStream(Stream):
if PY2:
def read(self, size = None):
self.DEBUG('%s.read: size=%s', self.__class__.__name__, size)
buff = self._raw_read(size = size)
if not buff:
return bytearray([])
return bytearray([ord(c) for c in buff])
else:
def read(self, size = None):
self.DEBUG('%s.read: size=%s', self.__class__.__name__, size)
buff = self._raw_read(size = size)
return buff
def write(self, b):
raise NotImplementedError('%s does not implement write method' % self.__class__.__name__)
@staticmethod
def create(machine, desc):
machine.LOGGER.debug('InputStream.create: desc=%s', desc)
if isfile(desc):
return FileInputStream(machine, desc)
if hasattr(desc, 'read'):
return MethodInputStream(machine, desc)
if hasattr(desc, 'fileno'):
return FDInputStream(machine, desc.fileno())
if isinstance(desc, integer_types):
return FDInputStream(machine, desc)
if desc == '<stdin>':
return StdinStream(machine)
if isinstance(desc, string_types):
return FileInputStream(machine, open(desc, 'rb'))
raise InvalidResourceError('Unknown stream description: desc=%s' % desc)
class FileInputStream(InputStream):
def __init__(self, machine, f, **kwargs):
super(FileInputStream, self).__init__(machine, '<file %s>' % f.name, stream = f, fd = f.fileno())
class MethodInputStream(InputStream):
def __init__(self, machine, desc, **kwargs):
super(MethodInputStream, self).__init__(machine, repr(desc), stream = desc)
class FDInputStream(InputStream):
def __init__(self, machine, fd, **kwargs):
super(FDInputStream, self).__init__(machine, '<fd %s>' % fd, fd = fd)
class StdinStream(InputStream):
def __init__(self, machine, **kwargs):
DEBUG = machine.LOGGER.debug
self.old_termios = None
stream = machine.stdin.buffer if hasattr(machine.stdin, 'buffer') else machine.stdin
fd = machine.stdin.fileno() if hasattr(machine.stdin, 'fileno') else None
DEBUG('%s.__init__: stream=%s, fd=%s', self.__class__.__name__, stream, fd)
if fd is not None:
DEBUG('%s.__init__: re-pack <stdin> fd as a new stream to avoid colisions', self.__class__.__name__)
stream = os.fdopen(fd, 'rb', 0)
fd = stream.fileno()
DEBUG('%s.__init__: set <stdin> fd to non-blocking mode', self.__class__.__name__)
fd_blocking(fd, block = False)
if stream.isatty():
DEBUG('%s.__init__: attempt to set CBREAK and NOECHO mode', self.__class__.__name__)
try:
self.old_termios = termios.tcgetattr(fd)
flags = termios.tcgetattr(fd)
flags[3] = flags[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, flags)
tty.setcbreak(fd)
except termios.error as e:
if e.args[0] != errno.ENOTTY:
raise e
DEBUG('%s.__init__: stream=%r, fd=%r', self.__class__.__name__, stream, fd)
super(StdinStream, self).__init__(machine, '<stdin>', stream = stream, fd = fd, close = False, **kwargs)
def close(self):
self.DEBUG('%s.close', self.__class__.__name__)
if self.old_termios is not None:
termios.tcsetattr(self.fd, termios.TCSANOW, self.old_termios)
def get_selectee(self):
return self.stream
class OutputStream(Stream):
def read(self, size = None):
raise NotImplementedError('%s does not implement read method' % self.__class__.__name__)
if PY2:
def write(self, buff):
self.DEBUG('%s.write: buff=%s', self.__class__.__name__, buff)
self._raw_write(''.join([chr(b) for b in buff]))
else:
def write(self, buff):
self.DEBUG('%s.write: buff=%s', self.__class__.__name__, buff)
self._raw_write(bytes(buff))
def flush(self):
if self.stream is not None and hasattr(self.stream, 'flush'):
self.stream.flush()
@staticmethod
def create(machine, desc):
machine.LOGGER.debug('OutputStream.create: desc=%s', desc)
if isfile(desc):
return FileOutputStream(machine, desc)
if hasattr(desc, 'write'):
return MethodOutputStream(machine, desc)
if hasattr(desc, 'fileno'):
return FDOutputStream(machine, desc.fileno())
if isinstance(desc, integer_types):
return FDOutputStream(machine, desc)
if desc == '<stdout>':
return StdoutStream(machine)
if desc == '<stderr>':
return StderrStream(machine)
if isinstance(desc, string_types):
return FileOutputStream(machine, open(desc, 'wb'))
raise InvalidResourceError('Unknown stream description: desc=%s' % desc)
class FileOutputStream(OutputStream):
def __init__(self, machine, f, **kwargs):
super(FileOutputStream, self).__init__(machine, '<file %s>' % f.name, stream = f, fd = f.fileno())
class FDOutputStream(OutputStream):
def __init__(self, machine, fd, **kwargs):
super(FDOutputStream, self).__init__(machine, '<fd %s>' % fd, fd = fd)
class MethodOutputStream(OutputStream):
def __init__(self, machine, desc, **kwargs):
super(MethodOutputStream, self).__init__(machine, repr(desc), stream = desc)
class StdoutStream(OutputStream):
def __init__(self, machine):
stream = machine.stdout.buffer if hasattr(machine.stdout, 'buffer') else machine.stdout
fd = machine.stdout.fileno() if hasattr(machine.stdout, 'fileno') else None
super(StdoutStream, self).__init__(machine, '<stdout>', stream = stream, fd = fd, close = False)
class StderrStream(OutputStream):
def __init__(self, machine):
stream = machine.stderr.buffer if hasattr(machine.stderr, 'buffer') else machine.stderr
fd = machine.stderr.fileno() if hasattr(machine.stderr, 'fileno') else None
super(StderrStream, self).__init__(machine, '<stderr>', stream = stream, fd = fd, close = False)
| happz/ducky | ducky/streams.py | Python | mit | 12,385 |
"""Configuration of Radar forms."""
import os
import typing
from typing import Mapping, Optional, Sequence, TypedDict
import json5
class Config(TypedDict):
"""Configuration of Radar forms."""
domainIds: Sequence[str]
skillIds: Sequence[str]
translations: Mapping[str, str]
def from_json5_file(json_path: Optional[str] = None) -> Config:
"""Load a config from a json5 file."""
if not json_path:
json_path = os.path.join(os.path.dirname(__file__), 'config.json5')
with open(json_path, 'r') as json_file:
return typing.cast(Config, json5.load(json_file))
| bayesimpact/bob-emploi | data_analysis/radar/config.py | Python | gpl-3.0 | 604 |
import cv2
import sys
import os
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
import shutil
'''
To Do:
Allow for user input for folder date!
Copy Image Folder Directory
Search through all image files
Store in array of images to be processed
Vary Thresholds
Edge images are then written to each respective folder
Exit!
'''
def initialize():
plt.switch_backend('agg')
if __name__ == "__main__":
IMAGE_DIRECTORY = '/home/vagrant/images/'
IMAGE_SET_DATE = '02.28.15'
THRESHOLDS = []
a = 60
b = 80
while(a < 100):
b = 80
while(b < 120):
THRESHOLDS.append((a,b))
b += 10
a += 10
#THRESHOLDS = [(50,80),(50,90),(50,100),(50,110),(50,120),(75,80),(100,150),(100,200)]
initialize()
src = IMAGE_DIRECTORY + IMAGE_SET_DATE + '/original/'
dst = IMAGE_DIRECTORY + IMAGE_SET_DATE + '/edges/'
shutil.copytree(src, dst, symlinks=False, ignore=None)
for dirName, subdirList, fileList in os.walk(IMAGE_DIRECTORY + IMAGE_SET_DATE):
print('Found directory: %s' % dirName)
for fname in fileList:
print('\t%s' % fname)
print fname[-3:]
if fname[-3:] == 'jpg':
image = cv2.imread((dirName + '/' + fname),0)
dest = dirName.replace('original', 'edges')
cv2.imwrite(dest + '/' + fname, image)
for parameter in THRESHOLDS:
edge = cv2.Canny(image, parameter[0], parameter[1])
cv2.imwrite(dest + '/' + 'edge(' + str(parameter[0]) + \
',' + str(parameter[1]) + ')' + fname, edge)
print ("Canny Edge Detection completed!")
| patrickstocklin/SeniorThesis | scripts/edgeDetect.py | Python | agpl-3.0 | 1,509 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "barati.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| abhaystoic/barati | barati/manage.py | Python | apache-2.0 | 249 |
#-------------------------------------------------------------------------------------------------------------------#
#
# IB2d is an Immersed Boundary Code (IB) for solving fully coupled non-linear
# fluid-structure interaction models. This version of the code is based off of
# Peskin's Immersed Boundary Method Paper in Acta Numerica, 2002.
#
# Author: Nicholas A. Battista
# Email: nickabattista@gmail.com
# Date Created: May 27th, 2015
# Institution: UNC-CH
#
# This code is capable of creating Lagrangian Structures using:
# 1. Springs
# 2. Beams (*torsional springs)
# 3. Target Points
# 4. Muscle-Model (combined Force-Length-Velocity model, "HIll+(Length-Tension)")
#
# One is able to update those Lagrangian Structure parameters, e.g., spring constants, resting ## lengths, etc
#
# There are a number of built in Examples, mostly used for teaching purposes.
#
# If you would like us #to add a specific muscle model, please let Nick (nickabattista@gmail.com) know.
#
#--------------------------------------------------------------------------------------------------------------------#
import numpy as np
################################################################################################################
#
# def: Computes the components of the force term in Navier-Stokes from
# arbitrary external forces, i.e., external force to get desired
# velocity profile on fluid grid
#
################################################################################################################
def please_Compute_External_Forcing(dt,current_time,x,y, grid_Info, uX, uY, first, inds):
#
# dt: time-step
# current_time: Current time of simulation (in seconds)
# x: x-Eulerian pts
# y: y-Eulerian pts
# grid_Info: holds lots of geometric pieces about grid / simulations
# uX: x-Velocity on Eulerian Grid
# uY: y-Velocity on Eulerian Grid
# Grid Info #
Nx = grid_Info[0] # # of Eulerian pts. in x-direction
Ny = grid_Info[1] # # of Eulerian pts. in y-direction
Lx = grid_Info[2] # Length of Eulerian grid in x-coordinate
Ly = grid_Info[3] # Length of Eulerian grid in y-coordinate
dx = grid_Info[4] # Spatial-size in x
dy = grid_Info[5] # Spatial-size in y
supp = grid_Info[6] # Delta-def support
Nb = grid_Info[7] # # of Lagrangian pts.
ds = grid_Info[8] # Lagrangian spacing
# Compute Where You Want to Apply Force
xMin = 0.2
xMax = 0.3
yMin = 0.48
yMax = 0.52
# Stiffness for Arbitrary External Force to Fluid Grid
kStiff = 1e4
# Width of Channel
w = 0.3
# Max Velocity Desired
uMax = 10.0
if first == 1:
inds = give_Me_Indices_To_Apply_Force(x,y,xMin,xMax,yMin,yMax)
first = 0
# Compute External Forces from Desired Target Velocity
fx, fy = give_Me_Velocity_Target_External_Force_Density(current_time,dx,dy,x,y,Nx,Ny,Lx,Ly,uX,uY,kStiff,w,uMax,inds)
# Compute Total External Forces
Fx = fx
Fy = fy
return (Fx, Fy, first, inds)
######################################################################################
#
# def: computes indices for exerting forces in specified places on fluid grid
#
######################################################################################
def give_Me_Indices_To_Apply_Force(x,y,xMin,xMax,yMin,yMax):
j=0
noMinYet = 1
while noMinYet:
if ( x[j] >= xMin ):
iX_min = j
noMinYet = 0
j=j+1
j=x.size - 1
noMaxYet = 1
while noMaxYet:
if ( x[j] <= xMax ):
iX_max = j
noMaxYet = 0
j=j-1
j=0
noMinYet = 1
while noMinYet:
if ( y[j] >= yMin ):
iY_min = j
noMinYet = 0
j=j+1
j=y.size - 1
noMaxYet = 1
while noMaxYet:
if ( y[j] <= yMax ):
iY_max = j
noMaxYet = 0
j=j-1
iX_Vec = np.arange(iX_min,iX_max+1,1)
iY_Vec = np.arange(iY_min,iY_max+1,1)
n = 0
inds = np.zeros((len(iX_Vec)*len(iY_Vec),2))
for i in range(0,iX_Vec.size):
for j in range(0,iY_Vec.size):
inds[n,0] = iX_Vec[i]
inds[n,1] = iY_Vec[j]
n = n+1
return inds
######################################################################################
#
# def: computes the External Force Densities!
#
######################################################################################
def give_Me_Velocity_Target_External_Force_Density(t,dx,dy,x,y,Nx,Ny,Lx,Ly,uX,uY,kStiff,w,Umax,inds):
# t: current time in simulation
# Nx: # of nodes in x-direction on Eulerian grid
# Ny: # of nodes in y-direction on Eulerian grid
# uX: x-Velocity on Eulerian grid
# uY: y-Velocity on Eulerian grid
# kStiff: stiffness parameter
# inds: indices on the fluid grid for where to apply the arbitrary external force
fx = np.zeros((Ny,Nx)) # Initialize storage for x-force density from EXTERNAL FORCES
fy = np.zeros((Ny,Nx)) # Initialize storage for y-force density from EXTERNAL FORCES
if (t<0.01):
for n in range(0,inds.shape[0]):
i = int(inds[n,0])
j = int(inds[n,1])
uX_Tar,uY_Tar = please_Give_Target_Velocity(t,dx,dy,x,y,Lx,Ly,i,j,w,Umax)
fx[j,i] = fx[j,i] - kStiff*( uX[j,i] - uX_Tar )
fy[j,i] = fy[j,i] - kStiff*( uY[j,i] - uY_Tar )
fx_exts = fx
fy_exts = fy
return (fx_exts, fy_exts)
# MIGHT NOT NEED THESE!
#fx_exts = fx/ds^2
#fy_exts = fy/ds^2
########################################################################################################
#
# def: computes the Target Velocity Profile (MODEL DEPENDENT)
#
########################################################################################################
def please_Give_Target_Velocity(t,dx,dy,xGrid,yGrid,Lx,Ly,i,j,w,Umax):
# t: current time in simulation
# dx: x-Grid spacing
# dy: y-Grid spacing
# xGrid: vector of xPts in Eulerian grid
# yGrid: vector of yPts in Eulerian grid
# Lx: x-Length of Eulerian Grid
# Ly: y-Length of Eulerian Grid
# i: ith component in x-Grid
# j: jth component in y-Grid
# w: width of Channel
# Umax: maximum velocity
#y = yGrid(j) # y-Value considered
uX_Tar = 0 # Only external forces in x-direction
uY_Tar = -Umax * (np.tanh(20*t)) # No external forces in y-direction
return (uX_Tar, uY_Tar) | nickabattista/IB2d | pyIB2d/Examples/Rayleigh_Taylor_Instability/please_Compute_External_Forcing.py | Python | gpl-3.0 | 6,723 |
from util import utils
import os
from queue import PriorityQueue
k = None
def readGrid(filename):
filePath = os.path.join(utils.getResourcesPath(), filename)
f = open(filePath, 'r')
global k
k = int(f.readline().strip())
grid = []
for i in range(0, k):
grid.append([])
for _ in range(0, k):
grid[i].append(f.readline().strip())
return grid
def getValidNextConfigs(grid):
k = len(grid)
validNextConfigs = []
moves = []
for y in range(0, k):
if '0' in grid[y]:
x = grid[y].index('0')
if y != 0:
validNextConfigs.append(getResConfig(grid, 'UP'))
moves.append('UP')
if y != k-1:
validNextConfigs.append(getResConfig(grid, 'DOWN'))
moves.append('DOWN')
if x != 0:
validNextConfigs.append(getResConfig(grid, 'LEFT'))
moves.append('LEFT')
if x != k-1:
validNextConfigs.append(getResConfig(grid, 'RIGHT'))
moves.append('RIGHT')
return validNextConfigs, moves
def getResConfig(grid, move = 'UP'):
k = len(grid)
x = None
y = None
resConfig = []
for i in range(0, k):
resConfig.append([])
if '0' in grid[i]:
y = i
x = grid[y].index('0')
for j in range(0, k):
resConfig[i].append(grid[i][j])
if move == 'UP':
resConfig[y][x] = resConfig[y-1][x]
resConfig[y-1][x] = '0'
elif move == 'DOWN':
resConfig[y][x] = resConfig[y+1][x]
resConfig[y+1][x] = '0'
elif move == 'LEFT':
resConfig[y][x] = resConfig[y][x-1]
resConfig[y][x-1] = '0'
elif move == 'RIGHT':
resConfig[y][x] = resConfig[y][x+1]
resConfig[y][x+1] = '0'
return resConfig
#hFunction = ['misplaces', 'manhattan']
def getHeuristicCost(grid, hFunction = 'manhattan'):
k = len(grid)
cost = 0
for i in range(0, k):
for j in range(0, k):
if grid[i][j] != '0' and grid[i][j] != str(j + (k*i)):
if (hFunction == 'misplaced'):
cost += 1
elif (hFunction == 'manhattan'):
value = int(grid[i][j])
cost += abs(value//k - i) + abs(value%k - j)
return cost
def ucs(grid, start, end):
frontier = PriorityQueue()
costs = {}
explored = list()
path = {}
moves = {}
costs[start] = getHeuristicCost(grid)
frontier.put((costs[start], start))
while not frontier.empty():
conf = frontier.get()[1]
#found solution. Building path via backward visit
gridConf = stringToGrid(conf, k)
if getHeuristicCost(gridConf) == 0:
resmove = []
respath = [conf]
while True:
resmove.insert(0, moves[conf])
respath.insert(0, path[conf])
conf = path[conf]
if conf == start:
return resmove
explored.append(conf)
validNextConfigs, nextMoves = getValidNextConfigs(gridConf)
for i in range(len(validNextConfigs)):
nextConf = validNextConfigs[i]
move = gridToString(nextConf)
if not move in explored: #or not move in frontier:
path[move] = conf
moves[move] = nextMoves[i]
costs[move] = costs[conf] + (getHeuristicCost(nextConf) - getHeuristicCost(gridConf)) + 1
frontier.put((costs[move], move))
elif ((costs[move] > (costs[conf] + (getHeuristicCost(nextConf) - getHeuristicCost(gridConf)) + 1))):
path[move] = conf
moves[move] = nextMoves[i]
costs[move] = (costs[conf] + (getHeuristicCost(nextConf) - getHeuristicCost(gridConf)) + 1)
return None
def stringToGrid(value, k):
grid = []
for i in range(0, k):
grid.append([])
for j in range(0, k):
grid[i].append(value[i*k + j])
return grid
def gridToString(grid):
k = len(grid)
value = ''
for i in range(0, k):
for j in range(0, k):
value += grid[i][j]
return value
grid = readGrid("hackerrank/8Puzzle.txt")
end = readGrid("hackerrank/8PuzzleEnd.txt")
print('-'*10)
path = ucs(grid, gridToString(grid), gridToString(end))
print(len(path))
# for res in path:
# grid = stringToGrid(res, k)
# for row in grid:
# print(row)
# print('-'*10) | 5agado/intro-ai | src/test/nPuzzle.py | Python | apache-2.0 | 4,709 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to generate symbols for a binary suitable for breakpad.
Currently, the tool only supports Linux, Android, and Mac. Support for other
platforms is planned.
"""
from __future__ import print_function
import collections
import errno
import glob
import multiprocessing
import optparse
import os
import re
import shutil
import six.moves.queue
import subprocess
import sys
import threading
import traceback
CONCURRENT_TASKS=multiprocessing.cpu_count()
if sys.platform == 'win32':
# TODO(crbug.com/1190269) - we can't use more than 56
# cores on Windows or Python3 may hang.
CONCURRENT_TASKS = min(CONCURRENT_TASKS, 56)
# The BINARY_INFO tuple describes a binary as dump_syms identifies it.
BINARY_INFO = collections.namedtuple('BINARY_INFO',
['platform', 'arch', 'hash', 'name'])
def GetDumpSymsBinary(build_dir=None):
"""Returns the path to the dump_syms binary."""
DUMP_SYMS = 'dump_syms'
dump_syms_bin = os.path.join(os.path.expanduser(build_dir), DUMP_SYMS)
if not os.access(dump_syms_bin, os.X_OK):
print('Cannot find %s.' % dump_syms_bin)
return None
return dump_syms_bin
def Resolve(path, exe_path, loader_path, rpaths):
"""Resolve a dyld path.
@executable_path is replaced with |exe_path|
@loader_path is replaced with |loader_path|
@rpath is replaced with the first path in |rpaths| where the referenced file
is found
"""
path = path.replace('@loader_path', loader_path)
path = path.replace('@executable_path', exe_path)
if path.find('@rpath') != -1:
for rpath in rpaths:
new_path = path.replace('@rpath', rpath)
if os.access(new_path, os.X_OK):
return new_path
return ''
return path
def GetSharedLibraryDependenciesLinux(binary):
"""Return absolute paths to all shared library dependencies of the binary.
This implementation assumes that we're running on a Linux system."""
ldd = subprocess.check_output(['ldd', binary]).decode('utf-8')
lib_re = re.compile('\t.* => (.+) \(.*\)$')
result = []
for line in ldd.splitlines():
m = lib_re.match(line)
if m:
result.append(os.path.abspath(m.group(1)))
return result
def _GetSharedLibraryDependenciesAndroidOrChromeOS(binary):
"""GetSharedLibraryDependencies* suitable for Android or ChromeOS.
Both assume that the host is Linux-based, but the binary being symbolized is
being run on a device with potentially different architectures. Unlike ldd,
readelf plays nice with mixed host/device architectures (e.g. x86-64 host,
arm64 device), so use that.
"""
readelf = subprocess.check_output(['readelf', '-d', binary]).decode('utf-8')
lib_re = re.compile('Shared library: \[(.+)\]$')
result = []
binary_path = os.path.dirname(os.path.abspath(binary))
for line in readelf.splitlines():
m = lib_re.search(line)
if m:
lib = os.path.join(binary_path, m.group(1))
if os.access(lib, os.X_OK):
result.append(lib)
return result
def GetSharedLibraryDependenciesAndroid(binary):
"""Return absolute paths to all shared library dependencies of the binary.
This implementation assumes that we're running on a Linux system, but
compiled for Android."""
return _GetSharedLibraryDependenciesAndroidOrChromeOS(binary)
def GetDeveloperDirMac():
"""Finds a good DEVELOPER_DIR value to run Mac dev tools.
It checks the existing DEVELOPER_DIR and `xcode-select -p` and uses
one of those if the folder exists, and falls back to one of the
existing system folders with dev tools.
Returns:
(string) path to assign to DEVELOPER_DIR env var.
"""
candidate_paths = []
if 'DEVELOPER_DIR' in os.environ:
candidate_paths.append(os.environ['DEVELOPER_DIR'])
candidate_paths.extend([
subprocess.check_output(['xcode-select', '-p']).decode('utf-8').strip(),
# Most Mac 10.1[0-2] bots have at least one Xcode installed.
'/Applications/Xcode.app',
'/Applications/Xcode9.0.app',
'/Applications/Xcode8.0.app',
# Mac 10.13 bots don't have any Xcode installed, but have CLI tools as a
# temporary workaround.
'/Library/Developer/CommandLineTools',
])
for path in candidate_paths:
if os.path.exists(path):
return path
print('WARNING: no value found for DEVELOPER_DIR. Some commands may fail.')
def GetSharedLibraryDependenciesMac(binary, exe_path):
"""Return absolute paths to all shared library dependencies of the binary.
This implementation assumes that we're running on a Mac system."""
# realpath() serves two purposes:
# 1. If an executable is linked against a framework, it links against
# Framework.framework/Framework, which is a symlink to
# Framework.framework/Framework/Versions/A/Framework. rpaths are relative
# to the real location, so resolving the symlink is important.
# 2. It converts binary to an absolute path. If binary is just
# "foo.dylib" in the current directory, dirname() would return an empty
# string, causing "@loader_path/foo" to incorrectly expand to "/foo".
loader_path = os.path.dirname(os.path.realpath(binary))
env = os.environ.copy()
SRC_ROOT_PATH = os.path.join(os.path.dirname(__file__), '../../../..')
hermetic_otool_path = os.path.join(
SRC_ROOT_PATH, 'build', 'mac_files', 'xcode_binaries', 'Contents',
'Developer', 'Toolchains', 'XcodeDefault.xctoolchain', 'usr', 'bin',
'otool')
if os.path.exists(hermetic_otool_path):
otool_path = hermetic_otool_path
else:
developer_dir = GetDeveloperDirMac()
if developer_dir:
env['DEVELOPER_DIR'] = developer_dir
otool_path = 'otool'
otool = subprocess.check_output(
[otool_path, '-lm', binary], env=env).decode('utf-8').splitlines()
rpaths = []
dylib_id = None
for idx, line in enumerate(otool):
if line.find('cmd LC_RPATH') != -1:
m = re.match(' *path (.*) \(offset .*\)$', otool[idx+2])
rpath = m.group(1)
rpath = rpath.replace('@loader_path', loader_path)
rpath = rpath.replace('@executable_path', exe_path)
rpaths.append(rpath)
elif line.find('cmd LC_ID_DYLIB') != -1:
m = re.match(' *name (.*) \(offset .*\)$', otool[idx+2])
dylib_id = m.group(1)
# `man dyld` says that @rpath is resolved against a stack of LC_RPATHs from
# all executable images leading to the load of the current module. This is
# intentionally not implemented here, since we require that every .dylib
# contains all the rpaths it needs on its own, without relying on rpaths of
# the loading executables.
otool = subprocess.check_output(
[otool_path, '-Lm', binary], env=env).decode('utf-8').splitlines()
lib_re = re.compile('\t(.*) \(compatibility .*\)$')
deps = []
for line in otool:
m = lib_re.match(line)
if m:
# For frameworks and shared libraries, `otool -L` prints the LC_ID_DYLIB
# as the first line. Filter that out.
if m.group(1) == dylib_id:
continue
dep = Resolve(m.group(1), exe_path, loader_path, rpaths)
if dep:
deps.append(os.path.normpath(dep))
else:
print((
'ERROR: failed to resolve %s, exe_path %s, loader_path %s, '
'rpaths %s' % (m.group(1), exe_path, loader_path,
', '.join(rpaths))), file=sys.stderr)
sys.exit(1)
return deps
def GetSharedLibraryDependenciesChromeOS(binary):
"""Return absolute paths to all shared library dependencies of the binary.
This implementation assumes that we're running on a Linux system, but
compiled for ChromeOS."""
return _GetSharedLibraryDependenciesAndroidOrChromeOS(binary)
def GetSharedLibraryDependencies(options, binary, exe_path):
"""Return absolute paths to all shared library dependencies of the binary."""
deps = []
if options.platform.startswith('linux'):
deps = GetSharedLibraryDependenciesLinux(binary)
elif options.platform == 'android':
deps = GetSharedLibraryDependenciesAndroid(binary)
elif options.platform == 'darwin':
deps = GetSharedLibraryDependenciesMac(binary, exe_path)
elif options.platform == 'chromeos':
deps = GetSharedLibraryDependenciesChromeOS(binary)
else:
print("Platform not supported.")
sys.exit(1)
result = []
build_dir = os.path.abspath(options.build_dir)
for dep in deps:
if (os.access(dep, os.X_OK) and
os.path.abspath(os.path.dirname(dep)).startswith(build_dir)):
result.append(dep)
return result
def GetTransitiveDependencies(options):
"""Return absolute paths to the transitive closure of all shared library
dependencies of the binary, along with the binary itself."""
binary = os.path.abspath(options.binary)
exe_path = os.path.dirname(binary)
if options.platform.startswith('linux'):
# 'ldd' returns all transitive dependencies for us.
deps = set(GetSharedLibraryDependencies(options, binary, exe_path))
deps.add(binary)
return list(deps)
elif (options.platform == 'darwin' or options.platform == 'android' or
options.platform == 'chromeos'):
binaries = set([binary])
queue = [binary]
while queue:
deps = GetSharedLibraryDependencies(options, queue.pop(0), exe_path)
new_deps = set(deps) - binaries
binaries |= new_deps
queue.extend(list(new_deps))
return binaries
print("Platform not supported.")
sys.exit(1)
def mkdir_p(path):
"""Simulates mkdir -p."""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def GetBinaryInfoFromHeaderInfo(header_info):
"""Given a standard symbol header information line, returns BINARY_INFO."""
# header info is of the form "MODULE $PLATFORM $ARCH $HASH $BINARY"
info_split = header_info.strip().split(' ', 4)
if len(info_split) != 5 or info_split[0] != 'MODULE':
return None
return BINARY_INFO(*info_split[1:])
def CreateSymbolDir(options, output_dir, relative_hash_dir):
"""Create the directory to store breakpad symbols in. On Android/Linux, we
also create a symlink in case the hash in the binary is missing."""
mkdir_p(output_dir)
if options.platform == 'android' or options.platform.startswith('linux'):
try:
os.symlink(relative_hash_dir, os.path.join(os.path.dirname(output_dir),
'000000000000000000000000000000000'))
except:
pass
def GenerateSymbols(options, binaries):
"""Dumps the symbols of binary and places them in the given directory."""
queue = six.moves.queue.Queue()
exceptions = []
print_lock = threading.Lock()
exceptions_lock = threading.Lock()
def _Worker():
dump_syms = GetDumpSymsBinary(options.build_dir)
while True:
try:
should_dump_syms = True
reason = "no reason"
binary = queue.get()
run_once = True
while run_once:
run_once = False
if not dump_syms:
should_dump_syms = False
reason = "Could not locate dump_syms executable."
break
dump_syms_output = subprocess.check_output(
[dump_syms, '-i', binary]).decode('utf-8')
header_info = dump_syms_output.splitlines()[0]
binary_info = GetBinaryInfoFromHeaderInfo(header_info)
if not binary_info:
should_dump_syms = False
reason = "Could not obtain binary information."
break
# See if the output file already exists.
output_dir = os.path.join(options.symbols_dir, binary_info.name,
binary_info.hash)
output_path = os.path.join(output_dir, binary_info.name + '.sym')
if os.path.isfile(output_path):
should_dump_syms = False
reason = "Symbol file already found."
break
# See if there is a symbol file already found next to the binary
potential_symbol_files = glob.glob('%s.breakpad*' % binary)
for potential_symbol_file in potential_symbol_files:
with open(potential_symbol_file, 'rt') as f:
symbol_info = GetBinaryInfoFromHeaderInfo(f.readline())
if symbol_info == binary_info:
CreateSymbolDir(options, output_dir, binary_info.hash)
shutil.copyfile(potential_symbol_file, output_path)
should_dump_syms = False
reason = "Found local symbol file."
break
if not should_dump_syms:
if options.verbose:
with print_lock:
print("Skipping %s (%s)" % (binary, reason))
continue
if options.verbose:
with print_lock:
print("Generating symbols for %s" % binary)
CreateSymbolDir(options, output_dir, binary_info.hash)
with open(output_path, 'wb') as f:
subprocess.check_call([dump_syms, '-r', binary], stdout=f)
except Exception as e:
with exceptions_lock:
exceptions.append(traceback.format_exc())
finally:
queue.task_done()
for binary in binaries:
queue.put(binary)
for _ in range(options.jobs):
t = threading.Thread(target=_Worker)
t.daemon = True
t.start()
queue.join()
if exceptions:
exception_str = ('One or more exceptions occurred while generating '
'symbols:\n')
exception_str += '\n'.join(exceptions)
raise Exception(exception_str)
def main():
parser = optparse.OptionParser()
parser.add_option('', '--build-dir', default='',
help='The build output directory.')
parser.add_option('', '--symbols-dir', default='',
help='The directory where to write the symbols file.')
parser.add_option('', '--binary', default='',
help='The path of the binary to generate symbols for.')
parser.add_option('', '--clear', default=False, action='store_true',
help='Clear the symbols directory before writing new '
'symbols.')
parser.add_option('-j', '--jobs', default=CONCURRENT_TASKS, action='store',
type='int', help='Number of parallel tasks to run.')
parser.add_option('-v', '--verbose', action='store_true',
help='Print verbose status output.')
parser.add_option('', '--platform', default=sys.platform,
help='Target platform of the binary.')
(options, _) = parser.parse_args()
if not options.symbols_dir:
print("Required option --symbols-dir missing.")
return 1
if not options.build_dir:
print("Required option --build-dir missing.")
return 1
if not options.binary:
print("Required option --binary missing.")
return 1
if not os.access(options.binary, os.X_OK):
print("Cannot find %s." % options.binary)
return 1
if options.clear:
try:
shutil.rmtree(options.symbols_dir)
except:
pass
if not GetDumpSymsBinary(options.build_dir):
return 1
# Build the transitive closure of all dependencies.
binaries = GetTransitiveDependencies(options)
GenerateSymbols(options, binaries)
return 0
if '__main__' == __name__:
sys.exit(main())
| nwjs/chromium.src | components/crash/content/tools/generate_breakpad_symbols.py | Python | bsd-3-clause | 15,345 |
import numpy as np
import theano
import theano.tensor as T
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from scipy.misc import imsave
def initialize_weight(n_vis, n_hid, W_name, numpy_rng, rng_dist):
if 'uniform' in rng_dist:
W = numpy_rng.uniform(low=-np.sqrt(6. / (n_vis + n_hid)),\
high=np.sqrt(6. / (n_vis + n_hid)),
size=(n_vis, n_hid)).astype(theano.config.floatX)
elif rng_dist == 'normal':
W = 0.01 * numpy_rng.normal(size=(n_vis, n_hid)).astype(theano.config.floatX)
return theano.shared(value = W, name=W_name, borrow=True)
def initalize_conv_weight(filter_shape, poolsize, numpy_rng):
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
np.prod(poolsize))
# initialize weights with random weights
W_bound = np.sqrt(6. / (fan_in + fan_out))
W = theano.shared(
np.asarray(
numpy_rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
return W
'''decaying learning rate'''
def get_epsilon(epsilon, n, i):
return float(epsilon / ( 1 + i/float(n)))
def broadcasted_switch(a, b, c):
return T.switch(a.dimshuffle(0, 1, 'x'), b, c)
def transNorm(transM, vec):
transN = T.zeros_like(vec)
transN = T.set_subtensor(transN[:,:,0], vec[:,:,0] * transM[0][0] \
+ vec[:,:,1] * transM[1][0] + vec[:,:,2] * transM[2][0])
transN = T.set_subtensor(transN[:,:,1], vec[:,:,0] * transM[0][1] \
+ vec[:,:,1] * transM[1][1] + vec[:,:,2] * transM[2][1])
transN = T.set_subtensor(transN[:,:,2], vec[:,:,0] * transM[0][2] \
+ vec[:,:,1] * transM[1][2] + vec[:,:,2] * transM[2][2])
return transN
def drawWithMarkers(fname, im):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(im, interpolation='nearest')
ax.add_patch(plt.Rectangle((85-3, 90-3), 6, 6, color='red',
linewidth=2, fill=False))
ax.add_patch(plt.Rectangle((90-3, 50-3), 6, 6, color='red',
linewidth=2, fill=False))
fig.savefig(fname, bbox_inches='tight', pad_inches=0)
def draw(fname, im):
imsave(fname, im)
def good_init_search():
pass
| lebek/reversible-raytracer | orbit_experiments/util.py | Python | mit | 2,653 |
# coding: utf-8
"""A tornado based nteract server."""
# Copyright (c) nteract development team.
# Distributed under the terms of the Modified BSD License.
import os
from notebook.utils import url_path_join as ujoin
from os.path import join as pjoin
from jupyter_core.paths import ENV_JUPYTER_PATH, jupyter_config_path
from . import PACKAGE_DIR
from ._version import __version__
from .config import NteractConfig
from .handlers import add_handlers
def load_jupyter_server_extension(nbapp):
"""Load the server extension.
"""
here = PACKAGE_DIR
nbapp.log.info('nteract extension loaded from %s' % here)
app_dir = here # bundle is part of the python package
web_app = nbapp.web_app
config = NteractConfig(parent=nbapp)
# original
# config.assets_dir = os.path.join(app_dir, 'static')
config.assets_dir = app_dir
config.page_url = '/nteract'
config.dev_mode = False
# Check for core mode.
core_mode = ''
if hasattr(nbapp, 'core_mode'):
core_mode = nbapp.core_mode
# Check for an app dir that is local.
if app_dir == here or app_dir == os.path.join(here, 'build'):
core_mode = True
config.settings_dir = ''
web_app.settings.setdefault('page_config_data', dict())
web_app.settings['page_config_data']['token'] = nbapp.token
web_app.settings['page_config_data']['ga_code'] = config.ga_code
web_app.settings['page_config_data']['asset_url'] = config.asset_url
web_app.settings['nteract_config'] = config
add_handlers(web_app, config)
| jdfreder/nteract | applications/jupyter-extension/nteract_on_jupyter/extension.py | Python | bsd-3-clause | 1,557 |
# -*- coding: utf-8 -*-
import base64
import werkzeug
import werkzeug.urls
from openerp import http, SUPERUSER_ID
from openerp.http import request
from openerp.tools.translate import _
class contactus(http.Controller):
def generate_google_map_url(self, street, city, city_zip, country_name):
url = "http://maps.googleapis.com/maps/api/staticmap?center=%s&sensor=false&zoom=8&size=298x298" % werkzeug.url_quote_plus(
'%s, %s %s, %s' % (street, city, city_zip, country_name)
)
return url
@http.route(['/page/website.contactus', '/page/contactus'], type='http', auth="public", website=True)
def contact(self, **kwargs):
values = {}
for field in ['description', 'partner_name', 'phone', 'contact_name', 'email_from', 'name']:
if kwargs.get(field):
values[field] = kwargs.pop(field)
values.update(kwargs=kwargs.items())
return request.website.render("website.contactus", values)
def create_lead(self, request, values, kwargs):
""" Allow to be overrided """
cr, context = request.cr, request.context
return request.registry['crm.lead'].create(cr, SUPERUSER_ID, values, context=dict(context, mail_create_nosubscribe=True))
def preRenderThanks(self, values, kwargs):
""" Allow to be overrided """
company = request.website.company_id
return {
'google_map_url': self.generate_google_map_url(company.street, company.city, company.zip, company.country_id and company.country_id.name_get()[0][1] or ''),
'_values': values,
'_kwargs': kwargs,
}
def get_contactus_response(self, values, kwargs):
values = self.preRenderThanks(values, kwargs)
return request.website.render(kwargs.get("view_callback", "website_crm.contactus_thanks"), values)
@http.route(['/crm/contactus'], type='http', auth="public", website=True)
def contactus(self, **kwargs):
def dict_to_str(title, dictvar):
ret = "\n\n%s" % title
for field in dictvar:
ret += "\n%s" % field
return ret
_TECHNICAL = ['show_info', 'view_from', 'view_callback'] # Only use for behavior, don't stock it
_BLACKLIST = ['id', 'create_uid', 'create_date', 'write_uid', 'write_date', 'user_id', 'active'] # Allow in description
_REQUIRED = ['name', 'contact_name', 'email_from', 'description'] # Could be improved including required from model
post_file = [] # List of file to add to ir_attachment once we have the ID
post_description = [] # Info to add after the message
values = {}
values['medium_id'] = request.registry['ir.model.data'].xmlid_to_res_id(request.cr, SUPERUSER_ID, 'crm.crm_medium_website')
values['section_id'] = request.registry['ir.model.data'].xmlid_to_res_id(request.cr, SUPERUSER_ID, 'website.salesteam_website_sales')
for field_name, field_value in kwargs.items():
if hasattr(field_value, 'filename'):
post_file.append(field_value)
elif field_name in request.registry['crm.lead']._fields and field_name not in _BLACKLIST:
values[field_name] = field_value
elif field_name not in _TECHNICAL: # allow to add some free fields or blacklisted field like ID
post_description.append("%s: %s" % (field_name, field_value))
if "name" not in kwargs and values.get("contact_name"): # if kwarg.name is empty, it's an error, we cannot copy the contact_name
values["name"] = values.get("contact_name")
# fields validation : Check that required field from model crm_lead exists
error = set(field for field in _REQUIRED if not values.get(field))
if error:
values = dict(values, error=error, kwargs=kwargs.items())
return request.website.render(kwargs.get("view_from", "website.contactus"), values)
# description is required, so it is always already initialized
if post_description:
values['description'] += dict_to_str(_("Custom Fields: "), post_description)
if kwargs.get("show_info"):
post_description = []
environ = request.httprequest.headers.environ
post_description.append("%s: %s" % ("IP", environ.get("REMOTE_ADDR")))
post_description.append("%s: %s" % ("USER_AGENT", environ.get("HTTP_USER_AGENT")))
post_description.append("%s: %s" % ("ACCEPT_LANGUAGE", environ.get("HTTP_ACCEPT_LANGUAGE")))
post_description.append("%s: %s" % ("REFERER", environ.get("HTTP_REFERER")))
values['description'] += dict_to_str(_("Environ Fields: "), post_description)
lead_id = self.create_lead(request, dict(values, user_id=False), kwargs)
values.update(lead_id=lead_id)
if lead_id:
for field_value in post_file:
attachment_value = {
'name': field_value.filename,
'res_name': field_value.filename,
'res_model': 'crm.lead',
'res_id': lead_id,
'datas': base64.encodestring(field_value.read()),
'datas_fname': field_value.filename,
}
request.registry['ir.attachment'].create(request.cr, SUPERUSER_ID, attachment_value, context=request.context)
return self.get_contactus_response(values, kwargs)
| mycodeday/crm-platform | website_crm/controllers/main.py | Python | gpl-3.0 | 5,499 |
#!/usr/bin/env python3
# Advent of Code 2016 - Day 21, Part One
import sys
import re
def swap_position(string, x, y):
string[x], string[y] = string[y], string[x]
return string
def swap_letter(string, x, y):
for i, c in enumerate(string):
if c == x:
string[i] = y
elif c == y:
string[i] = x
return string
def rotate_lr(string, n):
n = n % len(string)
return string[-n:] + string[:-n]
def rotate_position(string, x):
i = string.index(x)
n = 1 + i + (i >= 4)
return rotate_lr(string, n)
def reverse(string, x, y):
string[x:y+1] = string[x:y+1][::-1]
return string
def move(string, x, y):
c = string[x]
del string[x]
string.insert(y, c)
return string
def getints(string):
return map(int, re.findall(r'(\d+)', string))
def getchars(string):
z = re.findall(r'(?:^|\s)([a-z])(?:\s|$)', string)
return z
def main(argv):
if len(argv) < 2:
print("Usage: {} puzzle.txt".format(argv[0]))
return 1
with open(argv[1]) as f:
string = list('abcdefgh')
for line in f:
head, tail = line.split(maxsplit=1)
if head == 'swap':
head, tail = tail.split(maxsplit=1)
if head == 'position':
x, y = getints(tail)
string = swap_position(string, x, y)
elif head == 'letter':
x, y = getchars(tail)
string = swap_letter(string, x, y)
elif head == 'rotate':
head, tail = tail.split(maxsplit=1)
if head == 'left':
n = list(getints(tail))[0]
string = rotate_lr(string, -n)
elif head == 'right':
n = list(getints(tail))[0]
string = rotate_lr(string, n)
else:
x = getchars(tail)[0]
string = rotate_position(string, x)
elif head == 'reverse':
x, y = getints(tail)
string = reverse(string, x, y)
elif head == 'move':
x, y = getints(tail)
string = move(string, x, y)
print(''.join(string))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mcbor/adventofcode | 2016/day21/day21-pt1.py | Python | mit | 2,337 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import gzip
from Bio.Blast import NCBIXML
import multiprocessing
import argparse
def func_os_GetAllFilesPathAndNameWithExtensionInFolder (FolderAddress, FileExtension, ProcessSubFoldersAlso = True):
#IMPORTANT ... Extenstion should be like : "txt" , "a2" ... WITHOUT DOT !
FILES = [];
if ProcessSubFoldersAlso:
for root, dirs, files in os.walk(FolderAddress):
for file in files:
if file.endswith("." + FileExtension):
FILES.append(os.path.join(root, file));
return (FILES);
else:
for file in os.listdir(FolderAddress):
if file.endswith("." + FileExtension): #".txt" ;
FILES.append(FolderAddress + file);
return (FILES) ;
def func_GetUniProtID_FromGI (hit_id , EVEXDBcon):
#hit_id: gi|18202836|sp|Q9CQV8.3|1433B_MOUSE
GI = hit_id.split ("|")[1]
SQLSTR = "select symbol from map_gi2uniprot where gi = " + GI + " ;";
cur = EVEXDBcon.cursor()
cur.execute (SQLSTR)
rows = cur.fetchall()
cur.close()
if len(rows) == 0:
return set()
else:
RES = set();
for row in rows:
RES.add (row[0])
return RES
def func_GetUniProtID_ACCESSION (hit_id , EVEXDBcon):
#hit_id: gi|18202836|sp|Q9CQV8.3|1433B_MOUSE
ACC = hit_id.split ("|")[3].split(".")[0]
SQLSTR = "select symbol from map_PID2uniprot where PID = '" + ACC + "' ;"
cur = EVEXDBcon.cursor()
cur.execute (SQLSTR)
rows = cur.fetchall()
cur.close()
if len(rows) == 0:
return set()
else:
RES = set();
for row in rows:
RES.add (row[0])
return RES
def process_one_input_file (input_file , OUT_DIR):
output_file = OUT_DIR + input_file.split("/")[-1].split(".xml.gz")[0] + ".features_tsv.gz" ;
errlog_file = OUT_DIR + input_file.split("/")[-1].split(".xml.gz")[0] + ".errorlog.txt" ;
print("processing : " + input_file)
print("creating output : " + output_file)
print("creating errorlog: " + errlog_file)
inp_file_handle = gzip.open(input_file , 'rt')
out_file_handle = gzip.open(output_file, 'wt')
log_file_handle = open(errlog_file, "wt")
all_records = NCBIXML.parse(inp_file_handle)
cnt = 0 ;
for RECORD in all_records:
for alignment in RECORD.alignments:
for hsp in alignment.hsps:
#cnt += 1
L = []
#if cnt % 1000 == 0:
# print cnt ;
#Features : For each RECORD (Generally we have only 1 record)
if len (RECORD.query.split ())>1:
# L.append (input_file.split("/")[-1].split(".")[0]);
L.append (RECORD.query.split ()[0]);#<BlastOutput_query-def>T96060004884 DPY30_HUMAN</BlastOutput_query-def> #UniprotID = DPY30_HUMAN
else:
L.append (RECORD.query);
L.append (RECORD.query_id);#<BlastOutput_query-ID>90843</BlastOutput_query-ID>
L.append (RECORD.query_length)#<Iteration_query-len>99</Iteration_query-len>
L.append (RECORD.query_letters);#<BlastOutput_query-len>99</BlastOutput_query-len>
#Features : For each Alignment : EACH <Hit> ... and usually each <HIT> may have multiple <Hsp> ... we usually have 50 HSP
# PARAM_UNIProtID_FromGI = func_GetUniProtID_FromGI (alignment.hit_id , EVEXDBcon)
# PARAM_UNIProtID_FromACC = func_GetUniProtID_ACCESSION (alignment.hit_id , EVEXDBcon)
# #hit_id: gi|18202836|sp|Q9CQV8.3|1433B_MOUSE
PARAM_UNIProtID_FromXML = set()
tmp = alignment.hit_id.split("|")
if len (tmp) == 3:
PARAM_UNIProtID_FromXML.add (tmp[2])
PARAM_UNIProtID = PARAM_UNIProtID_FromXML
if len(PARAM_UNIProtID) == 0:
ErrStr = RECORD.query_id + "\t" + alignment.hit_id + "\t" + "GI: " + alignment.hit_id.split ("|")[1] + "\n" ;
log_file_handle.write (ErrStr)
continue
else:
PARAM_UNIProtID = ",".join (PARAM_UNIProtID)
L.append (PARAM_UNIProtID);# --> GI --> UniprotID
L.append (alignment.accession);#<Hit_accession>XP_005815176</Hit_accession>
L.append (alignment.length);#<Hit_len>104</Hit_len>
#L.append (alignment.hit_id);#<Hit_id>gi|551527403|ref|XP_005815176.1|</Hit_id>
#L.append (alignment.hit_def);#<Hit_def>PREDICTED: protein dpy-30 homolog [Xiphophorus maculatus]</Hit_def>
#Features : For each hsp : <hsp>
L.append (hsp.align_length);#<Hsp_align-len>98</Hsp_align-len>
L.append (hsp.bits) ;#<Hsp_bit-score>160.614</Hsp_bit-score>
L.append (hsp.score);#<Hsp_score>405</Hsp_score>
L.append (hsp.expect);# EVALUE : <Hsp_evalue>1.74162e-48</Hsp_evalue>
L.append (hsp.query_start);#<Hsp_query-from>2</Hsp_query-from>
L.append (hsp.query_end);#<Hsp_query-to>99</Hsp_query-to>
L.append (hsp.sbjct_start);#<Hsp_hit-from>7</Hsp_hit-from>
L.append (hsp.sbjct_end);#<Hsp_hit-to>104</Hsp_hit-to>
L.append (hsp.frame[0]);#<Hsp_query-frame>0</Hsp_query-frame>
L.append (hsp.frame[1]);#<Hsp_hit-frame>0</Hsp_hit-frame>
L.append (hsp.identities);#<Hsp_identity>74</Hsp_identity>
L.append (hsp.positives);#<Hsp_positive>92</Hsp_positive>
L.append (hsp.gaps);#<Hsp_gaps>0</Hsp_gaps>
out_file_handle.write ("\t".join(str(x) for x in L) + "\n")
inp_file_handle.close()
out_file_handle.close()
log_file_handle.close()
def worker_thread (input_file):
args = argument_parser()
OUT_DIR = args.out_folder
print("PROCESSING FILE: " , input_file)
process_one_input_file (input_file , OUT_DIR)
print("PROCESSING FILE: " , input_file , " ... DONE.")
def argument_parser():
parser = argparse.ArgumentParser(description="processXML output from interproscan")
parser.add_argument("-i", "--in_folder", type=str, help="full path to input file -- xml.gz file from deltaBlast run")
parser.add_argument("-o", "--out_folder", type=str, help="full path to out folder file from deltaBlast run")
args = parser.parse_args()
return args
if __name__== "__main__":
args = argument_parser()
INP_FILES = func_os_GetAllFilesPathAndNameWithExtensionInFolder(args.in_folder, "gz")
p = multiprocessing.Pool(8)
p.map(worker_thread, INP_FILES)
p.close()
p.join()
| TurkuNLP/CAFA3 | sequence_features/deltaBLAST_results_FeatureGenerator.py | Python | lgpl-3.0 | 7,031 |
# orm/scoping.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy import exc as sa_exc
from sqlalchemy.util import ScopedRegistry, ThreadLocalRegistry, warn
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm.session import Session
__all__ = ['ScopedSession']
class ScopedSession(object):
"""Provides thread-local management of Sessions.
Typical invocation is via the :func:`.scoped_session`
function::
Session = scoped_session(sessionmaker())
The internal registry is accessible,
and by default is an instance of :class:`.ThreadLocalRegistry`.
See also: :ref:`unitofwork_contextual`.
"""
def __init__(self, session_factory, scopefunc=None):
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
def __call__(self, **kwargs):
if kwargs:
scope = kwargs.pop('scope', False)
if scope is not None:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified.")
else:
sess = self.session_factory(**kwargs)
self.registry.set(sess)
return sess
else:
return self.session_factory(**kwargs)
else:
return self.registry()
def remove(self):
"""Dispose of the current contextual session."""
if self.registry.has():
self.registry().close()
self.registry.clear()
def configure(self, **kwargs):
"""reconfigure the sessionmaker used by this ScopedSession."""
if self.registry.has():
warn('At least one scoped session is already present. '
' configure() can not affect sessions that have '
'already been created.')
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a `Query` object
against the class when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(ScopedSession, meth, instrument(meth))
def makeprop(name):
def set(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set)
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map',
'is_active', 'autoflush', 'no_autoflush'):
setattr(ScopedSession, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ('close_all', 'object_session', 'identity_key'):
setattr(ScopedSession, prop, clslevel(prop))
| aurofable/medhack-server | venv/lib/python2.7/site-packages/sqlalchemy/orm/scoping.py | Python | mit | 4,600 |
''' This is the library for getting DotA2 insight information.
Problem:
there was no way to get dota internal data about heroes, their abilities...
in suitable for further work form.
Solution:
this library allows you to get access to all the data in the in-game files.
But information about single hero does not have much use, so there is a
way to get stats of selected heroes or get information about certain
match.
'''
from atod.meta import meta_info
from atod.models.interfaces import Member, Group
from atod.models.ability import Ability
from atod.models.abilities import Abilities
from atod.models.hero import Hero
from atod.models.heroes import Heroes
from atod.models.match import Match
from atod.utils.pick import get_recommendations
# from atod.utils import dota_api
| gasabr/AtoD | atod/__init__.py | Python | mit | 806 |
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^$', 'blog.views.home', name='home'),
url(r'^ver_post/(?P<id_post>[0-9]+)/$', 'blog.views.ver_post', name='vermipost'),
url(r'^contactame/$', 'blog.views.contact', name='contactame'),
url(r'^save_message/$', 'blog.views.save_message', name='save_message'),
url(r'^calculator/$', 'blog.views.calculadora', name='calculadora'),
url(r'^cambio/$', 'blog.views.cambio_moneda', name='cambio'),
url(r'^cronometro/$', 'blog.views.cronometro', name='cronometro'),
url(r'^galeria/$', 'blog.views.galeria', name='galeria'),
url(r'^formulario/$', 'blog.views.formulario', name='formulario'),
url(r'^curriculum/$', 'blog.views.curriculum', name='curriculum'),
url(r'^contacto/$', 'blog.views.contacto', name='contacto')
) | RodrigoToledoA/Blog | blog/urls.py | Python | gpl-2.0 | 1,094 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`ironic.tests.unit` -- ironic unit tests
=====================================================
.. automodule:: ironic.tests.unit
:platform: Unix
"""
# TODO(deva): move eventlet imports to ironic.__init__ once we move to PBR
import eventlet
eventlet.monkey_patch(os=False)
| hpproliant/ironic | ironic/tests/unit/__init__.py | Python | apache-2.0 | 1,020 |
import os
import sys
import pyttsx
import random
from data import *
EXIT_TAG = 'n'
class CTrain(object):
def __init__(self):
self._eng = pyttsx.init()
def pre(self):
print "*"*10,"DICTATE NUMBER TRAING", "*"*10
name = raw_input("Please enter your name: ")
data = CData(name).load()
if data is not None:
self._data = data
print "See you again ", name, ", your score is followings:"
self._data.showSt()
self._data.showDaySt()
else:
self._data = CData(name)
print "Welcome new challenger", name
print "You will start on level", self._data.level
return
def aft(self, doCnt):
self._data.save()
print "Bye %s, finish [%3d] , your score is followings:"%(self._data.name, doCnt)
self._data.showDetail(doCnt)
return
def run(self):
IsCon = True
idx = 1
while IsCon:
lvl = self._data.level
k = raw_input("press any key to continue, press n for exit: ")
if k.lower().find(EXIT_TAG) >= 0:
print "End training..."
isCon = False
break
print "\n[%3d]"%( idx,), " now level", lvl,", Please listening and enter what you heard\n"
nums = self.genNum(lvl)
self.readNum(nums)
d = raw_input()
ans,lvl = self._data.score(d, nums)
if ans:
print "SUCC"
else:
print "FAIL: ", nums
idx += 1
return idx-1
def genNum(self, lvl):
s = ""
for _ in range(lvl):
d = random.randint(0,9)
s += str(d)
return s
def readNum(self, nums):
for d in nums:
self._eng.say(d)
self._eng.runAndWait()
return
def main():
train = CTrain()
train.pre()
doCnt = train.run()
train.aft(doCnt)
if __name__ == "__main__":
main()
| anferneeHu/training | dictate_num/train.py | Python | bsd-2-clause | 2,041 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-02-02 12:18
from __future__ import unicode_literals
from django.contrib.auth.models import Permission
from django.db import migrations
def delete_old_comment_permission(apps, schema_editor):
"""
Deletes the old 'can_see_and_manage_comments' permission which is
split up into two seperate permissions.
"""
perm = Permission.objects.filter(codename="can_see_and_manage_comments")
if len(perm):
perm = perm.get()
# Save content_type for manual creation of new permissions.
content_type = perm.content_type
# Save groups. list() is necessary to evaluate the database query right now.
groups = list(perm.group_set.all())
# Delete permission
perm.delete()
# Create new permission
perm_see = Permission.objects.create(
codename="can_see_comments",
name="Can see comments",
content_type=content_type,
)
perm_manage = Permission.objects.create(
codename="can_manage_comments",
name="Can manage comments",
content_type=content_type,
)
for group in groups:
group.permissions.add(perm_see)
group.permissions.add(perm_manage)
group.save()
class Migration(migrations.Migration):
dependencies = [("motions", "0004_motionchangerecommendation_other_description")]
operations = [
migrations.AlterModelOptions(
name="motion",
options={
"default_permissions": (),
"ordering": ("identifier",),
"permissions": (
("can_see", "Can see motions"),
("can_create", "Can create motions"),
("can_support", "Can support motions"),
("can_see_comments", "Can see comments"),
("can_manage_comments", "Can manage comments"),
("can_manage", "Can manage motions"),
),
"verbose_name": "Motion",
},
),
migrations.RunPython(delete_old_comment_permission),
]
| CatoTH/OpenSlides | server/openslides/motions/migrations/0005_auto_20180202_1318.py | Python | mit | 2,199 |
#!/usr/bin/python3
import os, os.path, re, zipfile, json, shutil
def get_files_to_zip():
#Exclude git stuff, build scripts etc.
exclude = [
r'\.(py|sh|pem)$', #file endings
r'(\\|/)\.', #hidden files
r'package\.json|icon\.html|updates\.json|visma\.js', #file names
r'(\\|/)(promo|unittest|build|web-ext-artifacts|node_modules)(\\|/)' #folders
]
zippable_files = []
for root, folders, files in os.walk('.'):
#print(root)
for f in files:
file = os.path.join(root,f)
if not any(re.search(p, file) for p in exclude):
zippable_files.append(file)
return zippable_files
def create_addon(files, browser):
output_folder = 'build'
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
output_dir = os.path.join(output_folder, 'testdata-%s' % (browser))
if os.path.isdir(output_dir):
os.rename(output_dir, output_dir+"_temp")
shutil.rmtree(output_dir+"_temp")
os.mkdir(output_dir)
print('')
print('**** Creating addon for %s ****' % browser)
for f in files:
print('Adding', f)
of = os.path.join(output_dir, f)
os.makedirs(os.path.dirname(of), exist_ok=True)
if f.endswith('manifest.json'):
with open(f) as inF:
manifest = json.load(inF)
if browser != 'firefox':
del manifest['applications'] #Firefox specific, and causes warnings in other browsers...
#if browser == 'firefox':
# del manifest['background']['persistent'] #Firefox chokes on this, is always persistent anyway
with open(of, "w") as outF:
outF.write(json.dumps(manifest, indent=2))
else:
shutil.copy(f, of)
if browser == "chrome":
shutil.make_archive(output_dir, "zip", output_dir)
#if browser == 'opera':
#Create .nex
# os.system('./nex-build.sh %s %s %s' % (output_file, output_file.replace('.zip', '.nex'), cert))
if __name__ == '__main__':
#Make sure we can run this from anywhere
folder = os.path.dirname(os.path.realpath(__file__))
os.chdir(folder)
files = get_files_to_zip()
print('******* EXTENSION BUILD SCRIPT *******')
print('')
create_addon(files, 'chrome')
#create_addon(files, 'opera')
create_addon(files, 'firefox')
| samiljan/testdata | build.py | Python | mit | 2,510 |
# -*- coding: utf-8 -*-
#
# poster documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 28 17:00:50 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'poster'
copyright = '2008-2011, Chris AtLee'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import poster
version = ".".join(str(x) for x in poster.version)
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html', 'ads.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'posterdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'poster.tex', 'poster Documentation', 'Chris AtLee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| EvanDarwin/poster3 | docs/conf.py | Python | mit | 5,834 |
#!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: Cheetara Rx
# Generated: Mon Dec 17 21:03:15 2012
##################################################
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import uhd
from gnuradio import window
from gnuradio.eng_option import eng_option
from gnuradio.gr import firdes
from gnuradio.wxgui import fftsink2
from grc_gnuradio import wxgui as grc_wxgui
from optparse import OptionParser
import wx
class cheetara_rx(grc_wxgui.top_block_gui):
def __init__(self):
grc_wxgui.top_block_gui.__init__(self, title="Cheetara Rx")
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 125e3
##################################################
# Blocks
##################################################
self.wxgui_fftsink2_0 = fftsink2.fft_sink_c(
self.GetWin(),
baseband_freq=0,
y_per_div=10,
y_divs=10,
ref_level=0,
ref_scale=2.0,
sample_rate=samp_rate,
fft_size=1024,
fft_rate=15,
average=False,
avg_alpha=None,
title="FFT Plot",
peak_hold=False,
)
self.Add(self.wxgui_fftsink2_0.win)
self.uhd_usrp_source_0 = uhd.usrp_source(
device_addr="serial=E8R10Z2B1",
stream_args=uhd.stream_args(
cpu_format="fc32",
channels=range(1),
),
)
self.uhd_usrp_source_0.set_samp_rate(samp_rate)
self.uhd_usrp_source_0.set_center_freq(520e6, 0)
self.uhd_usrp_source_0.set_gain(5, 0)
self.uhd_usrp_source_0.set_antenna("RX2", 0)
self.uhd_usrp_source_0.set_bandwidth(100e3, 0)
##################################################
# Connections
##################################################
self.connect((self.uhd_usrp_source_0, 0), (self.wxgui_fftsink2_0, 0))
# QT sink close method reimplementation
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.wxgui_fftsink2_0.set_sample_rate(self.samp_rate)
self.uhd_usrp_source_0.set_samp_rate(self.samp_rate)
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
tb = cheetara_rx()
tb.Run(True)
| levelrf/level_basestation | wardrive/full_test/cheetara_rx.py | Python | gpl-3.0 | 2,302 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Orgs are collections of people who make decisions
after a threshold of consensus. When new admins are
added, the org keys fundimentally change hands.
Operations involving adding or removing citizens,
however, occur under the existing keys.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from twisted.protocols.amp import Command, String, Integer
#from ...types import ObjHash, ObjSig, ObjPubKey, ObjAddress, ObjBlob
class OrgConsensusBegin(Command):
"""
# admin pubkeys, consensus requirements
"""
arguments = [(b'name', String()),
(b'port', Integer())]
response = [(b'hello', String())]
class OrgConsensusIncrement(Command):
response = []
class OrgConsensusVerify(Command):
response = []
# bio items
class OrgBorderBegin(Command): # border object [(lat,long),...], yea/nay-date
response = []
class OrgBorderIncrement(Command):
response = []
class OrgBorderVerify(Command):
response = []
class OrgManifestoBegin(Command): # manifesto (markdown), yea/nay-date
response = []
class OrgManifestoIncrement(Command):
response = []
class OrgManifestoVerify(Command):
response = []
# org couriers:
# key, ip, voters in favor, admins in favor
class OrgStaffCouriersBegin(Command):
response = []
class OrgStaffCouriersIncrement(Command):
response = []
class OrgStaffCouriersVerify(Command):
response = []
# key, ip, voters in favor, admins in favor
class OrgVolunteerCouriersBegin(Command):
response = []
class OrgVolunteerCouriersIncrement(Command):
response = []
class OrgVolunteerCouriersVerify(Command):
response = []
# memberships
class OrgMembershipBegin(Command): # membership proposal object, yea/nay-date
response = []
class OrgMembershipIncrement(Command):
response = []
class OrgMembershipVerify(Command):
response = []
class OrgProposalBegin(Command): # proposal object, yea/nay-date
response = []
class OrgProposalIncrement(Command):
response = []
class OrgProposalVerify(Command):
response = []
class OrgVoteTallyBegin(Command): # vote object, ballots cast, yea/nay-date
response = []
class OrgVoteTallyIncrement(Command):
response = []
class OrgVoteTallyVerify(Command):
response = []
class OrgPollTallyBegin(Command): # poll object, ballots answered, yea/nay-date
response = []
class OrgPollTallyIncrement(Command):
response = []
class OrgPollTallyVerify(Command):
response = []
class OrgSavesBegin(Command): # hash to save, yea/nay-date
response = []
class OrgSavesIncrement(Command):
response = []
class OrgSavesVerify(Command):
response = []
class orgResponders(object):
redis = None
neo4j = None
def __init__(self):
# would pulll Redis online
pass
@OrgConsensusBegin.responder
def ConsensusBegin(self):
pass
@OrgConsensusIncrement.responder
def ConsensusIncrement(self):
pass
@OrgConsensusVerify.responder
def ConsensusVerify(self):
pass
# bio items
@OrgBorderBegin.responder # border object [.responder
def BorderBegin(self): # border object [(lat,long),...], yea/nay-date
pass
@OrgBorderIncrement.responder
def BorderIncrement(self):
pass
@OrgBorderVerify.responder
def BorderVerify(self):
pass
@OrgManifestoBegin.responder # manifesto .responder
def ManifestoBegin(self): # manifesto (markdown), yea/nay-date
pass
@OrgManifestoIncrement.responder
def ManifestoIncrement(self):
pass
@OrgManifestoVerify.responder
def ManifestoVerify(self):
pass
# org couriers:
@OrgStaffCouriersBegin.responder
def StaffCouriersBegin(self): # key, ip, voters in favor, admins in favor
pass
@OrgStaffCouriersIncrement.responder
def StaffCouriersIncrement(self):
pass
@OrgStaffCouriersVerify.responder
def StaffCouriersVerify(self):
pass
@OrgVolunteerCouriersBegin.responder
# key, ip, voters in favor, admins in favor
def VolunteerCouriersBegin(self):
pass
@OrgVolunteerCouriersIncrement.responder
def VolunteerCouriersIncrement(self):
pass
@OrgVolunteerCouriersVerify.responder
def VolunteerCouriersVerify(self):
pass
# memberships
@OrgMembershipBegin.responder
def MembershipBegin(self): # membership proposal object, yea/nay-date
pass
@OrgMembershipIncrement.responder
def MembershipIncrement(self):
pass
@OrgMembershipVerify.responder
def MembershipVerify(self):
pass
@OrgProposalBegin.responder
def ProposalBegin(self): # proposal object, yea/nay-date
pass
@OrgProposalIncrement.responder
def ProposalIncrement(self):
pass
@OrgProposalVerify.responder
def ProposalVerify(self):
pass
@OrgVoteTallyBegin.responder
def VoteTallyBegin(self): # vote object, ballots cast, yea/nay-date
pass
@OrgVoteTallyIncrement.responder
def VoteTallyIncrement(self):
pass
@OrgVoteTallyVerify.responder
def VoteTallyVerify(self):
pass
@OrgPollTallyBegin.responder
def PollTallyBegin(self): # poll object, ballots answered, yea/nay-date
pass
@OrgPollTallyIncrement.responder
def PollTallyIncrement(self):
pass
@OrgPollTallyVerify.responder
def PollTallyVerify(self):
pass
@OrgSavesBegin.responder
def SavesBegin(self): # hash to save, yea/nay-date
pass
@OrgSavesIncrement.responder
def SavesIncrement(self):
pass
@OrgSavesVerify.responder
def SavesVerify(self):
pass
# congredi/commands/proofs/org.py 131 30 77% 157, 161,
# 165, 170, 174, 178, 182, 186, 190, 195, 199, 203, 208, 212, 216, 221,
# 225, 229, 233, 237, 241, 245, 249, 253, 257, 261, 265, 269, 273, 277
| Thetoxicarcade/ac | congredi/commands/proofs/org.py | Python | gpl-3.0 | 6,036 |
"""Collection of constants fo the ``server_guardian`` app."""
# keep this in sync with server_guardian_api.constants
SERVER_STATUS = { # pragma: no cover
'OK': 'OK',
'WARNING': 'WARNING',
'DANGER': 'DANGER',
}
SERVER_STATUS_CHOICES = ( # pragma: no cover
('OK', SERVER_STATUS['OK']),
('WARNING', SERVER_STATUS['WARNING']),
('DANGER', SERVER_STATUS['DANGER']),
)
ERROR_STATUS = [SERVER_STATUS['WARNING'], SERVER_STATUS['DANGER']] # pragma: no cover # NOQA
HTML_STATUS_FALLBACK = [{ # pragma: no cover
'label': 'json_error',
'status': SERVER_STATUS['DANGER'],
'info': 'JSON response could not be decoded.'
}]
| bitmazk/django-server-guardian | server_guardian/constants.py | Python | mit | 653 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""This file contains the tests for the event storage."""
import os
import tempfile
import unittest
import zipfile
from plaso.dfwinreg import fake as dfwinreg_fake
from plaso.engine import queue
from plaso.events import text_events
from plaso.events import windows_events
from plaso.formatters import manager as formatters_manager
from plaso.formatters import mediator as formatters_mediator
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import pfilter
from plaso.lib import storage
from plaso.lib import timelib
from plaso.multi_processing import multi_process
from plaso.formatters import winreg # pylint: disable=unused-import
from plaso.serializer import protobuf_serializer
from tests import test_lib as shared_test_lib
class DummyObject(object):
"""Dummy object."""
class GroupMock(object):
"""Mock a class for grouping events together."""
def __init__(self):
"""Initializes the mock group object."""
self.groups = []
def AddGroup(
self, name, events, desc=None, first=0, last=0, color=None, cat=None):
"""Add a new group of events."""
self.groups.append((name, events, desc, first, last, color, cat))
def __iter__(self):
"""Iterator."""
for name, events, desc, first, last, color, cat in self.groups:
dummy = DummyObject()
dummy.name = name
dummy.events = events
if desc:
dummy.description = desc
if first:
dummy.first_timestamp = int(first)
if last:
dummy.last_timestamp = int(last)
if color:
dummy.color = color
if cat:
dummy.category = cat
yield dummy
class StorageFileTest(unittest.TestCase):
"""Tests for the plaso storage file."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._event_objects = self._CreateTestEventObjects()
self._formatter_mediator = formatters_mediator.FormatterMediator()
def _CreateTestEventObjects(self):
"""Creates the event objects for testing.
Returns:
A list of event objects (instances of EventObject).
"""
event_objects = []
filetime = dfwinreg_fake.Filetime()
filetime.CopyFromString(u'2012-04-20 22:38:46.929596')
values_dict = {u'Value': u'c:/Temp/evil.exe'}
event_object = windows_events.WindowsRegistryEvent(
filetime.timestamp, u'MY AutoRun key', values_dict)
event_object.parser = 'UNKNOWN'
event_objects.append(event_object)
filetime.CopyFromString(u'2012-05-02 13:43:26.929596')
values_dict = {u'Value': u'send all the exes to the other world'}
event_object = windows_events.WindowsRegistryEvent(
filetime.timestamp, u'\\HKCU\\Secret\\EvilEmpire\\Malicious_key',
values_dict)
event_object.parser = 'UNKNOWN'
event_objects.append(event_object)
filetime.CopyFromString(u'2012-04-20 16:44:46')
values_dict = {u'Value': u'run all the benign stuff'}
event_object = windows_events.WindowsRegistryEvent(
filetime.timestamp, u'\\HKCU\\Windows\\Normal', values_dict)
event_object.parser = 'UNKNOWN'
event_objects.append(event_object)
timemstamp = timelib.Timestamp.CopyFromString(u'2009-04-05 12:27:39')
text_dict = {
u'hostname': u'nomachine',
u'text': (
u'This is a line by someone not reading the log line properly. And '
u'since this log line exceeds the accepted 80 chars it will be '
u'shortened.'),
u'username': u'johndoe'}
event_object = text_events.TextEvent(timemstamp, 12, text_dict)
event_object.parser = 'UNKNOWN'
event_objects.append(event_object)
return event_objects
def testStorageWriter(self):
"""Test the storage writer."""
self.assertEqual(len(self._event_objects), 4)
# The storage writer is normally run in a separate thread.
# For the purpose of this test it has to be run in sequence,
# hence the call to WriteEventObjects after all the event objects
# have been queued up.
# TODO: add upper queue limit.
# A timeout is used to prevent the multi processing queue to close and
# stop blocking the current process.
test_queue = multi_process.MultiProcessingQueue(timeout=0.1)
test_queue_producer = queue.ItemQueueProducer(test_queue)
test_queue_producer.ProduceItems(self._event_objects)
test_queue_producer.SignalAbort()
with tempfile.NamedTemporaryFile() as temp_file:
storage_writer = storage.FileStorageWriter(test_queue, temp_file)
storage_writer.WriteEventObjects()
z_file = zipfile.ZipFile(temp_file, 'r', zipfile.ZIP_DEFLATED)
expected_z_filename_list = [
u'plaso_index.000001', u'plaso_meta.000001', u'plaso_proto.000001',
u'plaso_timestamps.000001', u'serializer.txt']
z_filename_list = sorted(z_file.namelist())
self.assertEqual(len(z_filename_list), 5)
self.assertEqual(z_filename_list, expected_z_filename_list)
def testStorage(self):
"""Test the storage object."""
event_objects = []
timestamps = []
group_mock = GroupMock()
tags = []
tags_mock = []
groups = []
group_events = []
same_events = []
serializer = protobuf_serializer.ProtobufEventObjectSerializer
with shared_test_lib.TempDirectory() as dirname:
temp_file = os.path.join(dirname, 'plaso.db')
store = storage.StorageFile(temp_file)
store.AddEventObjects(self._event_objects)
# Add tagging.
tag_1 = event.EventTag()
tag_1.store_index = 0
tag_1.store_number = 1
tag_1.comment = 'My comment'
tag_1.color = 'blue'
tags_mock.append(tag_1)
tag_2 = event.EventTag()
tag_2.store_index = 1
tag_2.store_number = 1
tag_2.tags = ['Malware']
tag_2.color = 'red'
tags_mock.append(tag_2)
tag_3 = event.EventTag()
tag_3.store_number = 1
tag_3.store_index = 2
tag_3.comment = 'This is interesting'
tag_3.tags = ['Malware', 'Benign']
tag_3.color = 'red'
tags_mock.append(tag_3)
store.StoreTagging(tags_mock)
# Add additional tagging, second round.
tag_4 = event.EventTag()
tag_4.store_index = 1
tag_4.store_number = 1
tag_4.tags = ['Interesting']
store.StoreTagging([tag_4])
group_mock.AddGroup(
'Malicious', [(1, 1), (1, 2)], desc='Events that are malicious',
color='red', first=1334940286000000, last=1334961526929596,
cat='Malware')
store.StoreGrouping(group_mock)
store.Close()
read_store = storage.StorageFile(temp_file, read_only=True)
self.assertTrue(read_store.HasTagging())
self.assertTrue(read_store.HasGrouping())
for event_object in read_store.GetEntries(1):
event_objects.append(event_object)
timestamps.append(event_object.timestamp)
if event_object.data_type == 'windows:registry:key_value':
self.assertEqual(event_object.timestamp_desc,
eventdata.EventTimestamp.WRITTEN_TIME)
else:
self.assertEqual(event_object.timestamp_desc,
eventdata.EventTimestamp.WRITTEN_TIME)
for tag in read_store.GetTagging():
event_object = read_store.GetTaggedEvent(tag)
tags.append(event_object)
groups = list(read_store.GetGrouping())
self.assertEqual(len(groups), 1)
group_events = list(read_store.GetEventsFromGroup(groups[0]))
# Read the same events that were put in the group, just to compare
# against.
event_object = read_store.GetEventObject(1, 1)
serialized_event_object = serializer.WriteSerialized(event_object)
same_events.append(serialized_event_object)
event_object = read_store.GetEventObject(1, 2)
serialized_event_object = serializer.WriteSerialized(event_object)
same_events.append(serialized_event_object)
self.assertEqual(len(event_objects), 4)
self.assertEqual(len(tags), 4)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2009-04-05 12:27:39')
self.assertEqual(tags[0].timestamp, expected_timestamp)
self.assertEqual(tags[0].store_number, 1)
self.assertEqual(tags[0].store_index, 0)
self.assertEqual(tags[0].tag.comment, u'My comment')
self.assertEqual(tags[0].tag.color, u'blue')
msg, _ = formatters_manager.FormattersManager.GetMessageStrings(
self._formatter_mediator, tags[0])
self.assertEqual(msg[0:10], u'This is a ')
self.assertEqual(tags[1].tag.tags[0], 'Malware')
msg, _ = formatters_manager.FormattersManager.GetMessageStrings(
self._formatter_mediator, tags[1])
self.assertEqual(msg[0:15], u'[\\HKCU\\Windows\\')
self.assertEqual(tags[2].tag.comment, u'This is interesting')
self.assertEqual(tags[2].tag.tags[0], 'Malware')
self.assertEqual(tags[2].tag.tags[1], 'Benign')
self.assertEqual(tags[2].parser, 'UNKNOWN')
# Test the newly added fourth tag, which should include data from
# the first version as well.
self.assertEqual(tags[3].tag.tags[0], 'Interesting')
self.assertEqual(tags[3].tag.tags[1], 'Malware')
expected_timestamps = [
1238934459000000, 1334940286000000, 1334961526929596, 1335966206929596]
self.assertEqual(timestamps, expected_timestamps)
self.assertEqual(groups[0].name, u'Malicious')
self.assertEqual(groups[0].category, u'Malware')
self.assertEqual(groups[0].color, u'red')
self.assertEqual(groups[0].description, u'Events that are malicious')
self.assertEqual(groups[0].first_timestamp, 1334940286000000)
self.assertEqual(groups[0].last_timestamp, 1334961526929596)
self.assertEqual(len(group_events), 2)
self.assertEqual(group_events[0].timestamp, 1334940286000000)
self.assertEqual(group_events[1].timestamp, 1334961526929596)
proto_group_events = []
for group_event in group_events:
serialized_event_object = serializer.WriteSerialized(group_event)
proto_group_events.append(serialized_event_object)
self.assertEqual(same_events, proto_group_events)
class StoreStorageTest(unittest.TestCase):
"""Test sorting storage file,"""
def setUp(self):
"""Setup sets parameters that will be reused throughout this test."""
# TODO: have sample output generated from the test.
# TODO: Use input data with a defined year. syslog parser chooses a
# year based on system clock; forcing updates to test file if regenerated.
self.test_file = os.path.join(u'test_data', u'psort_test.proto.plaso')
self.first = timelib.Timestamp.CopyFromString(u'2012-07-20 15:44:14')
self.last = timelib.Timestamp.CopyFromString(u'2016-11-18 01:15:43')
def testStorageSort(self):
"""This test ensures that items read and output are in the expected order.
This method by design outputs data as it runs. In order to test this a
a modified output renderer is used for which the flush functionality has
been removed.
The test will be to read the TestEventBuffer storage and check to see
if it matches the known good sort order.
"""
pfilter.TimeRangeCache.ResetTimeConstraints()
pfilter.TimeRangeCache.SetUpperTimestamp(self.last)
pfilter.TimeRangeCache.SetLowerTimestamp(self.first)
store = storage.StorageFile(self.test_file, read_only=True)
store.store_range = [1, 5, 6]
read_list = []
event_object = store.GetSortedEntry()
while event_object:
read_list.append(event_object.timestamp)
event_object = store.GetSortedEntry()
expected_timestamps = [
1344270407000000, 1392438730000000, 1427151678000000, 1451584472000000]
self.assertEqual(read_list, expected_timestamps)
if __name__ == '__main__':
unittest.main()
| ostree/plaso | tests/lib/storage.py | Python | apache-2.0 | 11,812 |
# This file is part of the Edison Project.
# Please refer to the LICENSE document that was supplied with this software for information on how it can be used.
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib.auth.views import login, logout
# Project specific imports
from views import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^list/', listdata),
(r'^$', home),
(r'^add$',add),
(r'^edit/(?P<cfgid>\d+)/$',edit),
# REST based API URI's
(r'^api/', include('api.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
)
| proffalken/edison | cmdb/urls.py | Python | bsd-3-clause | 732 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'ParameterValue.value'
db.alter_column(u'damis_parametervalue', 'value', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
def backwards(self, orm):
# Changing field 'ParameterValue.value'
db.alter_column(u'damis_parametervalue', 'value', self.gf('django.db.models.fields.CharField')(default='', max_length=255))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'damis.cluster': {
'Meta': {'object_name': 'Cluster'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'workload_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True'})
},
u'damis.component': {
'Meta': {'object_name': 'Component'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['damis.Cluster']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_lt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'function': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'label_lt': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'wsdl': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'damis.connection': {
'Meta': {'object_name': 'Connection'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source'", 'null': 'True', 'to': u"orm['damis.ParameterValue']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': u"orm['damis.ParameterValue']"})
},
u'damis.dataset': {
'Meta': {'object_name': 'Dataset'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'damis.experiment': {
'Meta': {'object_name': 'Experiment'},
'finish': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_calc_time': ('django.db.models.fields.TimeField', [], {'default': "'2:00'", 'null': 'True'}),
'p': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'SAVED'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiments'", 'null': 'True', 'to': u"orm['auth.User']"}),
'workflow_state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'damis.parameter': {
'Meta': {'object_name': 'Parameter'},
'algorithm': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parameters'", 'null': 'True', 'to': u"orm['damis.Component']"}),
'connection_type': ('django.db.models.fields.CharField', [], {'default': "'INPUT_VALUE'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_LT': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'label_LT': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'damis.parametervalue': {
'Meta': {'object_name': 'ParameterValue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['damis.Parameter']"}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['damis.ParameterValue']", 'null': 'True', 'through': u"orm['damis.Connection']", 'blank': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parameter_values'", 'to': u"orm['damis.WorkflowTask']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'damis.workflowtask': {
'Meta': {'object_name': 'WorkflowTask'},
'algorithm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['damis.Component']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tasks'", 'null': 'True', 'to': u"orm['damis.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['damis'] | InScience/DAMIS-old | src/damis/migrations/0039_auto__chg_field_parametervalue_value.py | Python | agpl-3.0 | 11,165 |
import unittest
from reoccur import reoccur, reoccur_no_mem
class TestReoccur(unittest.TestCase):
def test_reoccur_1(self):
self.assertEqual(reoccur("abcdefabcdef"), 'a')
def test_reoccur_2(self):
self.assertEqual(reoccur("cc"), 'c')
def test_reoccur_3(self):
self.assertEqual(reoccur("xylem"), False)
def test_reoccur_4(self):
self.assertEqual(reoccur("cabbage"), 'b')
def test_reoccur_5(self):
self.assertEqual(reoccur(""), False)
def test_reoccur_6(self):
self.assertEqual(reoccur("12345 098765 blah"), '5')
def test_reoccur_no_mem_1(self):
self.assertEqual(reoccur_no_mem("abcdefabcdef"), 'a')
def test_reoccur_no_mem_2(self):
self.assertEqual(reoccur_no_mem("cc"), 'c')
def test_reoccur_no_mem_3(self):
self.assertEqual(reoccur_no_mem("xylem"), False)
def test_reoccur_no_mem_4(self):
self.assertEqual(reoccur_no_mem("cabbage"), 'b')
def test_reoccur_no_mem_5(self):
self.assertEqual(reoccur_no_mem(""), False)
def test_reoccur_no_mem_6(self):
self.assertEqual(reoccur_no_mem("12345 098765 blah"), '5')
if __name__ == "__main__":
unittest.main()
| icemanblues/InterviewCodeTests | reoccurring-characters/test_reoccur.py | Python | gpl-2.0 | 1,256 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2015 EDF SA
# Contact:
# CCN - HPC <dsp-cspit-ccn-hpc@edf.fr>
# 1, Avenue du General de Gaulle
# 92140 Clamart
#
# Authors: CCN - HPC <dsp-cspit-ccn-hpc@edf.fr>
#
# This file is part of HPCStats.
#
# HPCStats is free software: you can redistribute in and/or
# modify it under the terms of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with HPCStats. If not, see
# <http://www.gnu.org/licenses/>.
#
# On Calibre systems, the complete text of the GNU General
# Public License can be found in `/usr/share/common-licenses/GPL'.
"""
Schema of the ``Cluster`` table in HPCStats database:
.. code-block:: sql
Cluster(
cluster_id SERIAL,
cluster_name character varying(30) NOT NULL,
CONSTRAINT Cluster_pkey PRIMARY KEY (cluster_id),
CONSTRAINT Cluster_unique UNIQUE (cluster_name)
)
"""
import logging
logger = logging.getLogger(__name__)
from HPCStats.Exceptions import HPCStatsDBIntegrityError, HPCStatsRuntimeError
class Cluster(object):
"""Model class for Cluster table"""
def __init__(self, name, cluster_id=None):
self.cluster_id = cluster_id
self.name = name
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name
def find(self, db):
"""Search the Cluster in the database based on its name. If exactly
one cluster matches in database, set cluster_id attribute properly
and returns its value. If more than one cluster matches, raises
HPCStatsDBIntegrityError. If no cluster is found, returns None.
"""
req = """
SELECT cluster_id
FROM Cluster
WHERE cluster_name = %s
"""
params = ( self.name, )
db.execute(req, params)
nb_rows = db.cur.rowcount
if nb_rows == 0:
logger.debug("cluster %s not found in DB", str(self))
return None
elif nb_rows > 1:
raise HPCStatsDBIntegrityError(
"several cluster_id found in DB for cluster %s" \
% (str(self)))
else:
self.cluster_id = db.cur.fetchone()[0]
logger.debug("cluster %s found in DB with id %d",
str(self),
self.cluster_id )
return self.cluster_id
def save(self, db):
"""Insert Cluster in database. You must make sure that the Cluster does
not already exist in database yet (typically using Cluster.find()
method else there is a risk of future integrity errors because of
duplicated clusters. If cluster_id attribute is set, it raises
HPCStatsRuntimeError.
"""
if self.cluster_id is not None:
raise HPCStatsRuntimeError(
"could not insert cluster %s since already existing in "\
"database" \
% (str(self)))
req = """
INSERT INTO Cluster ( cluster_name )
VALUES ( %s )
RETURNING cluster_id
"""
params = ( self.name, )
#print db.cur.mogrify(req, params)
db.execute(req, params)
self.cluster_id = db.cur.fetchone()[0]
def get_nb_cpus(self, db):
"""Returns the total number of CPUs available on the cluster"""
if self.cluster_id is None:
raise HPCStatsRuntimeError(
"could not search for data with cluster %s since not " \
"found in database" \
% (str(self)))
req = """
SELECT SUM(node_nbCpu)
FROM Node
WHERE cluster_id = %s
"""
params = ( self.cluster_id, )
#print db.cur.mogrify(req, params)
db.execute(req, params)
return db.cur.fetchone()[0]
def get_min_datetime(self, db):
"""Returns the start datetime of the oldest started and unfinished
job on the cluster.
"""
if self.cluster_id is None:
raise HPCStatsRuntimeError(
"could not search for data with cluster %s since not " \
"found in database" \
% (str(self)))
req = """
SELECT MIN(job_start)
FROM Job
WHERE cluster_id = %s
AND job_state NOT IN ('CANCELLED', 'NODE_FAIL', 'PENDING')
"""
params = ( self.cluster_id, )
#print db.cur.mogrify(req, params)
db.execute(req, params)
return db.cur.fetchone()[0]
def get_nb_accounts(self, db, creation_date):
"""Returns the total of users on the cluster whose account have been
created defore date given in parameter.
"""
if self.cluster_id is None:
raise HPCStatsRuntimeError(
"could not search for data with cluster %s since not " \
"found in database" \
% (str(self)))
req = """
SELECT COUNT (userhpc_id)
FROM Userhpc,
Account
WHERE Account.userhpc_id = Userhpc.userhpc_id
AND Account.account_creation < %s
AND Account.cluster_id = %s
"""
params = (creation_date, self.cluster_id )
#print db.cur.mogrify(req, params)
db.execute(req, params)
return db.cur.fetchone()[0]
def get_nb_active_users(self, db, start, end):
"""Returns the total number of users who have run job(s) on the cluster
between start and end datetimes in parameters.
"""
if self.cluster_id is None:
raise HPCStatsRuntimeError(
"could not search for data with cluster %s since not " \
"found in database" \
% (str(self)))
req = """
SELECT COUNT(DISTINCT userhpc_id)
FROM Job
WHERE Job.cluster_id = %s,
AND ((job_start BETWEEN %s AND %s)
OR (job_end BETWEEN %s AND %s)
OR (job_start <= %s AND job_end >= %s))
"""
params = (self.cluster_id, start, end, start, end, start, end)
#print db.cur.mogrify(req, params)
db.execute(req, params)
return db.cur.fetchone()[0]
| edf-hpc/hpcstats | HPCStats/Model/Cluster.py | Python | gpl-2.0 | 6,981 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 22 15:52:24 2014
Script to plot the seismograms generated by SPECFEM2D.
The arguments must be correct paths to existing 2D seismogram files or
an existing option (--hold, --grid)
@author: Alexis Bottero (alexis.bottero@gmail.com)
"""
from __future__ import (absolute_import, division, print_function)
import argparse
import numpy as np # NumPy (multidimensional arrays, linear algebra, ...)
import matplotlib.pyplot as plt # Matplotlib's pyplot: MATLAB-like syntax
parser = argparse.ArgumentParser(
description='Plot seismograms generated by SPECFEM2D')
parser.add_argument('--hold', action='store_true',
help='Plot all seismograms on the same figure')
parser.add_argument('--grid', action='store_true',
help='Show a grid on the plot')
parser.add_argument('files', nargs='+', type=argparse.FileType('r'),
help='Files to be plotted')
args = parser.parse_args()
for seismo in args.files:
data = np.loadtxt(seismo)
if not args.hold:
plt.figure()
plt.plot(data[:, 0], data[:, 1])
plt.xlim(data[0, 0], data[-1, 0])
plt.grid(args.grid)
plt.hold(args.hold)
if not args.hold:
plt.title(seismo.name)
plt.show()
| geodynamics/specfem1d | Fortran_version/plot_script_using_python.py | Python | gpl-2.0 | 1,288 |
"""
Copyright 2015 Paul T. Grogan, Massachusetts Institute of Technology
Copyright 2017 Paul T. Grogan, Stevens Institute of Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Test cases for L{ofspy.location} package.
"""
import unittest
from ...context.location import Location, Surface, Orbit
"""
Test cases for L{ofspy.location.Location} class.
"""
class LocationTestCase(unittest.TestCase):
def setUp(self):
self.default = Location(0)
def tearDown(self):
self.default = None
def test_isSurface(self):
self.assertFalse(self.default.isSurface())
def test_isOrbit(self):
self.assertFalse(self.default.isOrbit())
"""
Test cases for L{ofspy.location.Surface} class.
"""
class SurfaceTestCase(unittest.TestCase):
def setUp(self):
self.default = Surface(0)
def tearDown(self):
self.default = None
def test_isSurface(self):
self.assertTrue(self.default.isSurface())
def test_isOrbit(self):
self.assertFalse(self.default.isOrbit())
"""
Test cases for L{ofspy.location.Orbit} class.
"""
class OrbitTestCase(unittest.TestCase):
def setUp(self):
self.default = Orbit(0, "LEO")
def tearDown(self):
self.default = None
def test_isSurface(self):
self.assertFalse(self.default.isSurface())
def test_isOrbit(self):
self.assertTrue(self.default.isOrbit())
| ptgrogan/ofspy | ofspy/test/context/test_location.py | Python | apache-2.0 | 1,888 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import re
import signal
import subprocess
import sys
import tempfile
from telemetry.core.platform import profiler
# Parses one line of strace output, for example:
# 6052 1311456063.159722 read(8, "\1\0\0\0\0\0\0\0", 8) = 8 <0.000022>
_STRACE_LINE_RE = re.compile(
'^(?P<tid>\d+)\s+'
'(?P<ts>\d+)'
'(?P<micro>.\d+)\s+'
'(?P<func>.*?)'
'[(](?P<args>.*?)[)]\s+=\s+'
'(?P<ret>.*?)\s+'
'<(?P<dur>[\d.]+)>$')
_UNFINISHED_LINE_RE = re.compile(
'^(?P<tid>\d+)\s+'
'(?P<line>.*?)'
'<unfinished ...>$')
_RESUMED_LINE_RE = re.compile(
'^(?P<tid>\d+)\s+'
'(?P<ts>\d+)'
'(?P<micro>.\d+)\s+'
'<[.][.][.]\s(?P<func>.*?)\sresumed>'
'(?P<line>.*?)$')
_KILLED_LINE_RE = re.compile(
'^(?P<tid>\d+)\s+'
'(?P<ts>\d+)'
'(?P<micro>.\d+)\s+'
'[+][+][+] killed by SIGKILL [+][+][+]$')
def _StraceToChromeTrace(pid, infile):
"""Returns chrometrace json format for |infile| strace output."""
# Map of fd:file_name for open file descriptors. Useful for displaying
# file name instead of the descriptor number.
fd_map = {}
# Map of tid:interrupted_call for the interrupted call on each thread. It is
# possible to context switch during a system call. In this case we must
# match up the lines.
interrupted_call_map = {}
out = []
with open(infile, 'r') as f:
for line in f.readlines():
# Ignore kill lines for now.
m = _KILLED_LINE_RE.match(line)
if m:
continue
# If this line is interrupted, then remember it and continue.
m = _UNFINISHED_LINE_RE.match(line)
if m:
assert m.group('tid') not in interrupted_call_map
interrupted_call_map[m.group('tid')] = line
continue
# If this is a resume of a previous line, stitch it together.
interrupted = False
m = _RESUMED_LINE_RE.match(line)
if m:
interrupted = True
assert m.group('tid') in interrupted_call_map
line = interrupted_call_map[m.group('tid')].replace(
'<unfinished ...>', m.group('line'))
del interrupted_call_map[m.group('tid')]
# At this point we can do a normal match.
m = _STRACE_LINE_RE.match(line)
if not m:
if ('exit' not in line and
'Profiling timer expired' not in line and
'<unavailable>' not in line):
logging.warn('Failed to parse line: %s' % line)
continue
ts_begin = int(1000000 * (int(m.group('ts')) + float(m.group('micro'))))
ts_end = ts_begin + int(1000000 * float(m.group('dur')))
tid = int(m.group('tid'))
function_name = unicode(m.group('func'), errors='ignore')
function_args = unicode(m.group('args'), errors='ignore')
ret = unicode(m.group('ret'), errors='ignore')
cat = 'strace'
possible_fd_arg = None
first_arg = function_args.split(',')[0]
if first_arg and first_arg.strip().isdigit():
possible_fd_arg = first_arg.strip()
if function_name == 'open' and ret.isdigit():
# 1918 1311606151.649379 open("/foo/bar.so", O_RDONLY) = 7 <0.000088>
fd_map[ret] = first_arg
args = {
'args': function_args,
'ret': ret,
}
if interrupted:
args['interrupted'] = True
if possible_fd_arg and possible_fd_arg in fd_map:
args['fd%s' % first_arg] = fd_map[possible_fd_arg]
out.append({
'cat': cat,
'pid': pid,
'tid': tid,
'ts': ts_begin,
'ph': 'B', # Begin
'name': function_name,
})
out.append({
'cat': cat,
'pid': pid,
'tid': tid,
'ts': ts_end,
'ph': 'E', # End
'name': function_name,
'args': args,
})
return out
def _GenerateTraceMetadata(model):
out = []
for process in model.processes:
out.append({
'name': 'process_name',
'ph': 'M', # Metadata
'pid': process,
'args': {
'name': model.processes[process].name
}
})
for thread in model.processes[process].threads:
out.append({
'name': 'thread_name',
'ph': 'M', # Metadata
'pid': process,
'tid': thread,
'args': {
'name': model.processes[process].threads[thread].name
}
})
return out
class _SingleProcessStraceProfiler(object):
"""An internal class for using perf for a given process."""
def __init__(self, pid, output_file, platform_backend):
self._pid = pid
self._platform_backend = platform_backend
self._output_file = output_file
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._proc = subprocess.Popen(
['strace', '-ttt', '-f', '-T', '-p', str(pid), '-o', output_file],
stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
def CollectProfile(self):
if ('renderer' in self._output_file and
not self._platform_backend.GetCommandLine(self._pid)):
logging.warning('Renderer was swapped out during profiling. '
'To collect a full profile rerun with '
'"--extra-browser-args=--single-process"')
self._proc.send_signal(signal.SIGINT)
exit_code = self._proc.wait()
try:
if exit_code:
raise Exception('strace failed with exit code %d. Output:\n%s' % (
exit_code, self._GetStdOut()))
finally:
self._tmp_output_file.close()
return _StraceToChromeTrace(self._pid, self._output_file)
def _GetStdOut(self):
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
class StraceProfiler(profiler.Profiler):
def __init__(self, browser_backend, platform_backend, output_path, state):
super(StraceProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
assert self._browser_backend.supports_tracing
self._browser_backend.StartTracing(None, 10)
process_output_file_map = self._GetProcessOutputFileMap()
self._process_profilers = []
self._output_file = output_path + '.json'
for pid, output_file in process_output_file_map.iteritems():
if 'zygote' in output_file:
continue
self._process_profilers.append(
_SingleProcessStraceProfiler(pid, output_file, platform_backend))
@classmethod
def name(cls):
return 'strace'
@classmethod
def is_supported(cls, browser_type):
if sys.platform != 'linux2':
return False
# TODO(tonyg): This should be supported on android and cros.
if (browser_type.startswith('android') or
browser_type.startswith('cros')):
return False
return True
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
options.AppendExtraBrowserArgs([
'--no-sandbox',
'--allow-sandbox-debugging'
])
def CollectProfile(self):
print 'Processing trace...'
out_json = []
for single_process in self._process_profilers:
out_json.extend(single_process.CollectProfile())
model = self._browser_backend.StopTracing().AsTimelineModel()
out_json.extend(_GenerateTraceMetadata(model))
with open(self._output_file, 'w') as f:
f.write(json.dumps(out_json, separators=(',', ':')))
print 'Trace saved as %s' % self._output_file
print 'To view, open in chrome://tracing'
return [self._output_file]
| androidarmv6/android_external_chromium_org | tools/telemetry/telemetry/core/platform/profiler/strace_profiler.py | Python | bsd-3-clause | 7,691 |
from django import template
register = template.Library()
class SetVarNode(template.Node):
def __init__(self, var_name, var_value):
self.var_name = template.Variable(str(var_name))
self.var_value = var_value
def render(self, context):
try:
value = template.Variable(self.var_value).resolve(context)
except template.VariableDoesNotExist:
value = ""
context[self.var_name] = value
return u""
def set_var(parser, token):
"""
{% set <var_name> = <var_value> %}
"""
parts = token.split_contents()
if len(parts) < 4:
raise template.TemplateSyntaxError("'set' tag must be of the form: {% set <var_name> = <var_value> %}")
return SetVarNode(parts[1], parts[3])
register.tag('set', set_var)
| raychorn/knowu | django/djangononrelsample2/views/templatetags/setvariabletag.py | Python | lgpl-3.0 | 841 |
#!/usr/bin/python -tt
__author__ = 'mricon'
import logging
import os
import sys
import anyjson
import totpcgi
import totpcgi.backends
import totpcgi.backends.file
import totpcgi.utils
import datetime
import dateutil
import dateutil.parser
import dateutil.tz
from string import Template
import syslog
#--------------- CHANGE ME TO REFLECT YOUR ENVIRONMENT -------------------
# You need to change this to reflect your environment
GL_2FA_COMMAND = 'ssh git@example.com 2fa'
HELP_DOC_LINK = 'https://example.com'
# Set to False to disallow yubikey (HOTP) enrolment
ALLOW_YUBIKEY = True
# This will allow anyone to use "override" as the 2-factor token
# Obviously, this should only be used during initial debugging
# and testing and then set to false.
ALLOW_BYPASS_OVERRIDE = False
# In the TOTP case, the window size is the time drift between the user's device
# and the server. A window size of 17 means 17*10 seconds, or in other words,
# we'll accept any tokencodes that were valid within 170 seconds before now, and
# 170 seconds after now.
# In the HOTP case, discrepancy between the counter on the device and the counter
# on the server is virtually guaranteed (accidental button presses on the yubikey,
# authentication failures, etc), so the window size indicates how many tokens we will
# try in addition to the current one. The setting of 30 is sane and is not likely to
# lock someone out.
TOTP_WINDOW_SIZE = 17
HOTP_WINDOW_SIZE = 30
# First value is the number of times. Second value is the number of seconds.
# So, "3, 30" means "3 falures within 30 seconds"
RATE_LIMIT = (3, 30)
# Google Authenticator and other devices default to key length of 80 bits, while
# for yubikeys the length must be 160 bits. I suggest you leave these as-is.
TOTP_KEY_LENGTH = 80
HOTP_KEY_LENGTH = 160
# This identifies the token in the user's TOTP app
TOTP_USER_MASK = '$username@example.com'
# GeoIP-city database location.
# This is only currently used as a sort of a reminder to the users, so when they list
# their current validations using list-val, it can help them figure out where they
# previously authorized from.
# You can download the City database from
# http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.xz and put
# into GL_ADMIN_BASE/2fa/ (uncompress first). If the code doesn't find it, it'll
# try to use the basic GeoIP country information. If that fails, it'll just
# quitely omit GeoIP data.
GEOIP_CITY_DB = os.path.join(os.environ['GL_ADMIN_BASE'], '2fa/GeoLiteCity.dat')
# Identify ourselves in syslog as "gl-2fa"
syslog.openlog('gl-2fa', syslog.LOG_PID, syslog.LOG_AUTH)
#-------------------------------------------------------------------------
# default basic logger. We override it later.
logger = logging.getLogger(__name__)
def print_help_link():
print('')
print('If you need more help, please see the following link:')
print(' %s' % HELP_DOC_LINK)
print('')
def get_geoip_crc(ipaddr):
import GeoIP
if os.path.exists(GEOIP_CITY_DB):
logger.debug('Opening geoip db in %s' % GEOIP_CITY_DB)
gi = GeoIP.open(GEOIP_CITY_DB, GeoIP.GEOIP_STANDARD)
else:
logger.debug('%s does not exist, using basic geoip db' % GEOIP_CITY_DB)
gi = GeoIP.new(GeoIP.GEOIP_MEMORY_CACHE)
ginfo = gi.record_by_addr(ipaddr)
if ginfo is not None:
city = region_name = country_code = 'Unknown'
if ginfo['city'] is not None:
city = unicode(ginfo['city'], 'iso-8859-1')
if ginfo['region_name'] is not None:
region_name = unicode(ginfo['region_name'], 'iso-8859-1')
if ginfo['country_code'] is not None:
country_code = unicode(ginfo['country_code'], 'iso-8859-1')
crc = u'%s, %s, %s' % (city, region_name, country_code)
else:
# try just the country code, then
crc = gi.country_code_by_addr(ipaddr)
if not crc:
return None
crc = unicode(crc, 'iso-8859-1')
return crc
def load_authorized_ips():
# The authorized ips file has the following structure:
# {
# 'IP_ADDR': {
# 'added': RFC_8601_DATETIME,
# 'expires': RFC_8601_DATETIME,
# 'whois': whois information about the IP at the time of recording,
# 'geoip': geoip information about the IP at the time of recording,
# }
#
# It is stored in GL_ADMIN_BASE/2fa/validations/GL_USER.js
user = os.environ['GL_USER']
val_dir = os.path.join(os.environ['GL_ADMIN_BASE'], '2fa/validations')
if not os.path.exists(val_dir):
os.makedirs(val_dir, 0700)
logger.debug('Created val_dir in %s' % val_dir)
valfile = os.path.join(val_dir, '%s.js' % user)
logger.debug('Loading authorized ips from %s' % valfile)
valdata = {}
if os.access(valfile, os.R_OK):
try:
fh = open(valfile, 'r')
jdata = fh.read()
fh.close()
valdata = anyjson.deserialize(jdata)
except:
logger.critical('Validations file exists, but could not be parsed!')
logger.critical('All previous validations have been lost, starting fresh.')
return valdata
def store_authorized_ips(valdata):
user = os.environ['GL_USER']
val_dir = os.path.join(os.environ['GL_ADMIN_BASE'], '2fa/validations')
valfile = os.path.join(val_dir, '%s.js' % user)
jdata = anyjson.serialize(valdata)
fh = open(valfile, 'w')
fh.write(jdata)
fh.close()
logger.debug('Wrote new validations file in %s' % valfile)
def store_validation(remote_ip, hours):
valdata = load_authorized_ips()
utc = dateutil.tz.tzutc()
now_time = datetime.datetime.now(utc).replace(microsecond=0)
expires = now_time + datetime.timedelta(hours=hours)
logger.info('Adding IP address %s until %s' % (remote_ip, expires.strftime('%c %Z')))
valdata[remote_ip] = {
'added': now_time.isoformat(sep=' '),
'expires': expires.isoformat(sep=' '),
}
# Try to lookup whois info if cymruwhois is available
try:
import cymruwhois
cym = cymruwhois.Client()
res = cym.lookup(remote_ip)
if res.owner and res.cc:
whois = "%s/%s\n" % (res.owner, res.cc)
valdata[remote_ip]['whois'] = whois
logger.info('Whois information for %s: %s' % (remote_ip, whois))
except:
pass
try:
geoip = get_geoip_crc(remote_ip)
if geoip is not None:
valdata[remote_ip]['geoip'] = geoip
logger.info('GeoIP information for %s: %s' % (remote_ip, geoip))
except:
pass
store_authorized_ips(valdata)
def generate_user_token(backends, mode):
if mode == 'totp':
gaus = totpcgi.utils.generate_secret(
RATE_LIMIT, TOTP_WINDOW_SIZE, 5, bs=TOTP_KEY_LENGTH)
else:
gaus = totpcgi.utils.generate_secret(
RATE_LIMIT, HOTP_WINDOW_SIZE, 5, bs=HOTP_KEY_LENGTH)
gaus.set_hotp(0)
user = os.environ['GL_USER']
backends.secret_backend.save_user_secret(user, gaus, None)
# purge all old state, as it's now obsolete
backends.state_backend.delete_user_state(user)
logger.info('New token generated for user %s' % user)
remote_ip = os.environ['SSH_CONNECTION'].split()[0]
syslog.syslog(
syslog.LOG_NOTICE,
'Enrolled: user=%s, host=%s, mode=%s' % (user, remote_ip, mode)
)
if mode == 'totp':
# generate provisioning URI
tpt = Template(TOTP_USER_MASK)
totp_user = tpt.safe_substitute(username=user)
qr_uri = gaus.otp.provisioning_uri(totp_user)
import urllib
print('')
print('Please make sure "qrencode" is installed.')
print('Run the following commands to display your QR code:')
print(' unset HISTFILE')
print(' qrencode -tANSI -m1 -o- "%s"' % qr_uri)
print('')
print('If that does not work or if you do not have access to')
print('qrencode or a similar QR encoding tool, then you may')
print('open an INCOGNITO/PRIVATE MODE window in your browser')
print('and paste the following URL:')
print(
'https://www.google.com/chart?chs=200x200&chld=M|0&cht=qr&chl=%s' %
urllib.quote_plus(qr_uri))
print('')
print('Scan the resulting QR code with your TOTP app, such as')
print('FreeOTP (recommended), Google Authenticator, Authy, or others.')
else:
import binascii
import base64
keyhex = binascii.hexlify(base64.b32decode(gaus.otp.secret))
print('')
print('Please make sure "ykpersonalize" has been installed.')
print('Insert your yubikey and, as root, run the following command')
print('to provision the secret into slot 1 (use -2 for slot 2):')
print(' unset HISTFILE')
print(' ykpersonalize -1 -ooath-hotp -oappend-cr -a%s' % keyhex)
print('')
if gaus.scratch_tokens:
print('Please write down/print the following 8-digit scratch tokens.')
print('If you lose your device or temporarily have no access to it, you')
print('will be able to use these tokens for one-time bypass.')
print('')
print('Scratch tokens:')
print('\n'.join(gaus.scratch_tokens))
print
print('Now run the following command to verify that all went well')
if mode == 'totp':
print(' %s val [token]' % GL_2FA_COMMAND)
else:
print(' %s val [yubkey button press]' % GL_2FA_COMMAND)
print_help_link()
def enroll(backends):
proceed = False
mode = 'totp'
if ALLOW_YUBIKEY and len(sys.argv) <= 2:
logger.critical('Enrolment mode not specified.')
elif ALLOW_YUBIKEY:
if sys.argv[2] not in ('totp', 'yubikey'):
logger.critical('%s is not a valid enrollment mode' % sys.argv[2])
else:
mode = sys.argv[2]
proceed = True
else:
proceed = True
if not proceed:
print('Please specify whether you are enrolling a yubikey or a TOTP phone app')
print('Examples:')
print(' %s enroll yubikey' % GL_2FA_COMMAND)
print(' %s enroll totp' % GL_2FA_COMMAND)
print_help_link()
sys.exit(1)
logger.info('%s enrollment mode selected' % mode)
user = os.environ['GL_USER']
try:
try:
backends.secret_backend.get_user_secret(user)
except totpcgi.UserSecretError:
pass
logger.critical('User %s already enrolled' % user)
print('Looks like you are already enrolled. If you want to re-issue your token,')
print('you will first need to remove your currently active one.')
print('')
print('If you have access to your current device or 8-digit scratch codes, run:')
print(' unenroll [token]')
print_help_link()
sys.exit(1)
except totpcgi.UserNotFound:
pass
generate_user_token(backends, mode)
def unenroll(backends):
token = sys.argv[2]
user = os.environ['GL_USER']
remote_ip = os.environ['SSH_CONNECTION'].split()[0]
ga = totpcgi.GoogleAuthenticator(backends)
try:
status = ga.verify_user_token(user, token)
except Exception, ex:
if ALLOW_BYPASS_OVERRIDE and token == 'override':
status = "%s uses 'override'. It's super effective!" % user
syslog.syslog(
syslog.LOG_NOTICE, 'OVERRIDE USED: user=%s, host=%s'
)
else:
logger.critical('Failed to validate token.')
print('If using a phone app, please wait for token to change before trying again.')
syslog.syslog(
syslog.LOG_NOTICE,
'Failure: user=%s, host=%s, message=%s' % (user, remote_ip, str(ex))
)
print_help_link()
sys.exit(1)
syslog.syslog(
syslog.LOG_NOTICE,
'Success: user=%s, host=%s, message=%s' % (user, remote_ip, status)
)
logger.info(status)
# Okay, deleting
logger.info('Removing the secrets file.')
backends.secret_backend.delete_user_secret(user)
# purge all old state, as it's now obsolete
logger.info('Cleaning up state files.')
backends.state_backend.delete_user_state(user)
logger.info('Expiring all validations.')
inval(expire_all=True)
logger.info('You have been successfully unenrolled.')
def val(backends, hours=24):
if len(sys.argv) <= 2:
logger.critical('Missing tokencode.')
print('You need to pass the token code as the last argument. E.g.:')
print(' %s val [token]' % GL_2FA_COMMAND)
print_help_link()
sys.exit(1)
token = sys.argv[2]
user = os.environ['GL_USER']
remote_ip = os.environ['SSH_CONNECTION'].split()[0]
ga = totpcgi.GoogleAuthenticator(backends)
try:
status = ga.verify_user_token(user, token)
except Exception, ex:
if ALLOW_BYPASS_OVERRIDE and token == 'override':
status = "%s uses 'override'. It's super effective!" % user
syslog.syslog(
syslog.LOG_NOTICE, 'OVERRIDE USED: user=%s, host=%s'
)
else:
logger.critical('Failed to validate token.')
print('If using a phone app, please wait for token to change before trying again.')
syslog.syslog(
syslog.LOG_NOTICE,
'Failure: user=%s, host=%s, message=%s' % (user, remote_ip, str(ex))
)
print_help_link()
sys.exit(1)
syslog.syslog(
syslog.LOG_NOTICE,
'Success: user=%s, host=%s, message=%s' % (user, remote_ip, status)
)
logger.info(status)
store_validation(remote_ip, hours)
def isval():
authorized_ips = load_authorized_ips()
remote_ip = os.environ['SSH_CONNECTION'].split()[0]
matching = None
if remote_ip not in authorized_ips.keys():
import netaddr
# We can't rely on strings, as ipv6 has more than one way to represent the same IP address, e.g.:
# 2001:4f8:1:10:0:1991:8:25 and 2001:4f8:1:10::1991:8:25
for authorized_ip in authorized_ips.keys():
if netaddr.IPAddress(remote_ip) == netaddr.IPAddress(authorized_ip):
# Found it
matching = authorized_ip
break
else:
matching = remote_ip
if matching is None:
print("False")
sys.exit(1)
# Okay, but is it still valid?
expires = authorized_ips[matching]['expires']
logger.debug('Validation for %s expires on %s' % (matching, expires))
import datetime
import dateutil
import dateutil.parser
import dateutil.tz
exp_time = dateutil.parser.parse(expires)
utc = dateutil.tz.tzutc()
now_time = datetime.datetime.now(utc)
logger.debug('exp_time: %s' % exp_time)
logger.debug('now_time: %s' % now_time)
if now_time > exp_time:
print("False")
sys.exit(1)
print("True")
sys.exit(0)
def list_val(active_only=True):
valdata = load_authorized_ips()
if active_only:
utc = dateutil.tz.tzutc()
now_time = datetime.datetime.now(utc)
for authorized_ip in valdata.keys():
exp_time = dateutil.parser.parse(valdata[authorized_ip]['expires'])
if now_time > exp_time:
del valdata[authorized_ip]
if valdata:
# anyjson doesn't let us indent
import json
print(json.dumps(valdata, indent=4))
if active_only:
print('Listed non-expired entries only. Run "list-val all" to list all.')
def inval(expire_all=False):
valdata = load_authorized_ips()
utc = dateutil.tz.tzutc()
now_time = datetime.datetime.now(utc).replace(microsecond=0)
new_exp_time = now_time - datetime.timedelta(seconds=1)
to_expire = []
if sys.argv[2] == 'myip':
inval_ip = os.environ['SSH_CONNECTION'].split()[0]
elif sys.argv[2] == 'all':
expire_all = True
else:
inval_ip = sys.argv[2]
if expire_all:
for authorized_ip in valdata:
exp_time = dateutil.parser.parse(valdata[authorized_ip]['expires'])
if exp_time > now_time:
to_expire.append(authorized_ip)
else:
if inval_ip not in valdata.keys():
logger.info('Did not find %s in the list of authorized IPs.' % inval_ip)
else:
to_expire.append(inval_ip)
if to_expire:
for inval_ip in to_expire:
exp_time = dateutil.parser.parse(valdata[inval_ip]['expires'])
if exp_time > now_time:
logger.info('Force-expired %s.' % inval_ip)
valdata[inval_ip]['expires'] = new_exp_time.isoformat(sep=' ')
else:
logger.info('%s was already expired.' % inval_ip)
store_authorized_ips(valdata)
list_val(active_only=True)
def main():
global logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"[%s] " % os.environ['GL_USER'] + "%(asctime)s - %(levelname)s - %(message)s")
# We log alongside GL_LOGFILE and follow Gitolite's log structure
(logdir, logname) = os.path.split(os.environ['GL_LOGFILE'])
logfile = os.path.join(logdir, '2fa-command-%s' % logname)
ch = logging.FileHandler(logfile)
ch.setFormatter(formatter)
if '2FA_LOG_DEBUG' in os.environ.keys():
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
ch.setLevel(loglevel)
logger.addHandler(ch)
# Only CRITICAL goes to console
ch = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
ch.setLevel(logging.INFO)
logger.addHandler(ch)
backends = totpcgi.backends.Backends()
# We only use file backends
secrets_dir = os.path.join(os.environ['GL_ADMIN_BASE'], '2fa/secrets')
state_dir = os.path.join(os.environ['GL_ADMIN_BASE'], '2fa/state')
logger.debug('secrets_dir=%s' % secrets_dir)
logger.debug('state_dir=%s' % state_dir)
# Create those two dirs if they don't exist
if not os.path.exists(secrets_dir):
os.makedirs(secrets_dir, 0700)
logger.info('Created %s' % secrets_dir)
if not os.path.exists(state_dir):
os.makedirs(state_dir, 0700)
logger.info('Created %s' % state_dir)
backends.secret_backend = totpcgi.backends.file.GASecretBackend(secrets_dir)
backends.state_backend = totpcgi.backends.file.GAStateBackend(state_dir)
if len(sys.argv) < 2:
command = 'help'
else:
command = sys.argv[1]
if command == 'enroll':
enroll(backends)
elif command == 'unenroll':
if len(sys.argv) <= 2:
logger.critical('Missing authorization token.')
print('Please use your current token code to unenroll.')
print('You may also use a one-time 8-digit code for the same purpose.')
print('E.g.: %s unenroll [token]' % GL_2FA_COMMAND)
sys.exit(1)
unenroll(backends)
elif command == 'val':
val(backends)
elif command == 'val-for-days':
if len(sys.argv) <= 2:
logger.critical('Missing number of days to keep the validation.')
sys.exit(1)
try:
days = int(sys.argv[2])
except ValueError:
logger.critical('The number of days should be an integer.')
sys.exit(1)
if days > 30 or days < 1:
logger.critical('The number of days must be a number between 1 and 30.')
sys.exit(1)
hours = days * 24
# shift token into 2nd position
del sys.argv[2]
val(backends, hours=hours)
elif command == 'list-val':
if len(sys.argv) > 2 and sys.argv[2] == 'all':
list_val(active_only=False)
else:
list_val(active_only=True)
elif command == 'inval':
if len(sys.argv) <= 2:
logger.critical('You need to provide an IP address to invalidate.')
logger.critical('You may use "myip" to invalidate your current IP address.')
logger.critical('You may also use "all" to invalidate ALL currently active IP addresses.')
sys.exit(1)
inval()
elif command == 'isval':
isval()
elif command == 'help':
# Print out a summary of commands
print('Command summary:')
print('---------------|-----------------------------------------------')
print('enroll [mode] | Enroll with 2-factor authentication')
print(' | (mode=totp or yubikey)')
print('---------------|-----------------------------------------------')
print('val [tkn] | Validate your current IP address for 24 hours')
print(' | (tkn means your currently displayed 2fa code')
print('---------------|-----------------------------------------------')
print('val-for-days | Validate your current IP address for NN days')
print(' [NN] [tkn] | (max=30)')
print('---------------|-----------------------------------------------')
print('list-val [all] | List currently validated IP addresses')
print(' | ("all" lists expired addresses as well)')
print('---------------|-----------------------------------------------')
print('inval [ip] | Force-invalidate a specific IP address')
print(' | (can be "myip" or "all")')
print('---------------|-----------------------------------------------')
print('isval | Checks if your current IP is valid and returns')
print(' | "True" or "False" (also sets error code)')
print('---------------|-----------------------------------------------')
print('unenroll [tkn] | Unenroll from 2-factor authentication')
print_help_link()
if __name__ == '__main__':
if 'GL_USER' not in os.environ:
sys.stderr.write('Please run me from gitolite hooks')
sys.exit(1)
if 'SSH_CONNECTION' not in os.environ:
sys.stderr.write('This only works when accessed over SSH')
sys.exit(1)
main()
| n054/totp-cgi | contrib/gitolite/command.py | Python | gpl-2.0 | 22,349 |
from __future__ import unicode_literals, print_function
from builtins import bytes, dict, list, int, float, str
import json
import sys
from cmd import Cmd
from reflectrpc.client import RpcClient
from reflectrpc.client import RpcError
import reflectrpc
import reflectrpc.cmdline
def print_types(types):
for t in types:
if t['type'] == 'enum':
print('enum: %s' % (t['name']))
print('Description: %s' % (t['description']))
for value in t['values']:
print(' [%d] %s - %s' % (value['intvalue'], value['name'], value['description']))
elif t['type'] == 'hash':
print('hash: %s' % (t['name']))
print('Description: %s' % (t['description']))
for field in t['fields']:
print(' [%s] %s - %s' % (field['type'], field['name'], field['description']))
else:
print('Unknown class of custom type: %s' % (t['type']))
def print_functions(functions):
for func_desc in functions:
paramlist = [param['name'] for param in func_desc['params']]
paramlist = ', '.join(paramlist)
print("%s(%s) - %s" % (func_desc['name'], paramlist, func_desc['description']))
for param in func_desc['params']:
print(" [%s] %s - %s" % (param['type'], param['name'], param['description']))
print(" Result: %s - %s" % (func_desc['result_type'], func_desc['result_desc']))
def split_exec_line(line):
tokens = []
curtoken = ''
intoken = False
instring = False
lastc = ''
arraylevel = 0
hashlevel = 0
for c in line:
if c.isspace():
if not intoken:
lastc = c
continue
# end of token?
if not arraylevel and not hashlevel and not instring:
tokens.append(curtoken.strip())
curtoken = ''
intoken = False
else:
intoken = True
if intoken:
curtoken += c
if c == '"':
if lastc != '\\':
instring = not instring
elif c == '[':
if not instring:
arraylevel += 1
elif c == ']':
if not instring:
arraylevel -= 1
elif c == '{':
if not instring:
hashlevel += 1
elif c == '}':
if not instring:
hashlevel -= 1
lastc = c
if len(curtoken.strip()):
tokens.append(curtoken.strip())
# type casting
itertokens = iter(tokens)
next(itertokens) # skip first token which is the method name
for i, t in enumerate(itertokens):
i += 1
try:
tokens[i] = json.loads(t)
except ValueError as e:
print("Invalid JSON in parameter %i:" % (i))
print("'%s'" % (t))
return None
return tokens
class ReflectRpcShell(Cmd):
def __init__(self, client):
if issubclass(Cmd, object):
super().__init__()
else:
Cmd.__init__(self)
self.client = client
def connect(self):
self.client.enable_auto_reconnect()
try:
self.retrieve_service_description()
self.retrieve_functions()
self.retrieve_custom_types()
except reflectrpc.client.NetworkError as e:
print(e, file=sys.stderr)
print('', file=sys.stderr)
reflectrpc.cmdline.connection_failed_error(self.client.host,
self.client.port, True)
except reflectrpc.client.HttpException as e:
if e.status == '401':
print('Authentication failed\n', file=sys.stderr)
reflectrpc.cmdline.connection_failed_error(self.client.host,
self.client.port, True)
raise e
self.prompt = '(rpc) '
if self.client.host.startswith('unix://'):
self.intro = "ReflectRPC Shell\n================\n\nType 'help' for available commands\n\nRPC server: %s" % (self.client.host)
else:
self.intro = "ReflectRPC Shell\n================\n\nType 'help' for available commands\n\nRPC server: %s:%i" % (self.client.host, self.client.port)
if self.service_description:
self.intro += "\n\nSelf-description of the Service:\n================================\n"
if self.service_description['name']:
self.intro += self.service_description['name']
if self.service_description['version']:
self.intro += " (%s)\n" % (self.service_description['version'])
if self.service_description['description']:
self.intro += self.service_description['description']
def retrieve_service_description(self):
self.service_description = ''
try:
self.service_description = self.client.rpc_call('__describe_service')
except RpcError:
pass
def retrieve_functions(self):
self.functions = []
try:
self.functions = self.client.rpc_call('__describe_functions')
except RpcError:
pass
def retrieve_custom_types(self):
self.custom_types = []
try:
self.custom_types = self.client.rpc_call('__describe_custom_types')
except RpcError:
pass
def complete_doc(self, text, line, start_index, end_index):
return self.function_completion(text, line)
def complete_exec(self, text, line, start_index, end_index):
return self.function_completion(text, line)
def complete_notify(self, text, line, start_index, end_index):
return self.function_completion(text, line)
def function_completion(self, text, line):
if len(line.split()) > 2:
return []
if len(line.split()) == 2 and text == '':
return []
result = [f['name'] for f in self.functions if f['name'].startswith(text)]
if len(result) == 1 and result[0] == text:
return []
return result
def complete_type(self, text, line, start_index, end_index):
if len(line.split()) > 2:
return []
if len(line.split()) == 2 and text == '':
return []
result = [t['name'] for t in self.custom_types if t['name'].startswith(text)]
if len(result) == 1 and result[0] == text:
return []
return result
def do_help(self, line):
if not line:
print("list - List all RPC functions advertised by the server")
print("doc - Show the documentation of a RPC function")
print("type - Show the documentation of a custom RPC type")
print("types - List all custom RPC types advertised by the server")
print("exec - Execute an RPC call")
print("notify - Execute an RPC call but tell the server to send no response")
print("raw - Directly send a raw JSON-RPC message to the server")
print("quit - Quit this program")
print("help - Print this message. 'help [command]' prints a")
print(" detailed help message for a command")
return
if line == 'list':
print("List all RPC functions advertised by the server")
elif line == 'doc':
print("Show the documentation of an RPC function")
print("Example:")
print(" doc echo")
elif line == 'type':
print("Shos the documentation of a custom RPC type")
print("Example:")
print(" type PhoneType")
elif line == 'types':
print("List all custom RPC types advertised by the server")
elif line == 'exec':
print("Execute an RPC call")
print("Examples:")
print(" exec echo \"Hello RPC server\"")
print(" exec add 4 8")
elif line == 'notify':
print("Execute an RPC call but tell the server to send no response")
print("Example:")
print(" notify rpc_function")
elif line == 'raw':
print("Directly send a raw JSON-RPC message to the server")
print("Example:")
print(' raw {"method": "echo", "params": ["Hello Server"], "id": 1}')
elif line == 'quit':
print("Quit this program")
elif line == 'help':
pass
else:
print("No help available for unknown command:", line)
def do_type(self, line):
if not line:
print("You have to pass the name of a custom RPC type: 'type [typename]'")
return
t = [t for t in self.custom_types if t['name'] == line]
if not t:
print("Unknown custom RPC type:", line)
print_types(t)
def do_types(self, line):
for t in self.custom_types:
print(t['name'])
def do_exec(self, line):
tokens = split_exec_line(line)
if not tokens:
return
method = tokens.pop(0)
try:
result = self.client.rpc_call(method, *tokens)
print("Server replied:", json.dumps(result, indent=4, sort_keys=True))
except RpcError as e:
print(e)
def do_notify(self, line):
tokens = split_exec_line(line)
if not tokens:
return
method = tokens.pop(0)
self.client.rpc_notify(method, *tokens)
def do_raw(self, line):
print(self.client.rpc_call_raw(line))
def do_doc(self, line):
if not line:
print("You have to pass the name of an RPC function: 'doc [function]'")
return
function = [func for func in self.functions if func['name'] == line]
if not function:
print("Unknown RPC function:", line)
print_functions(function)
def do_list(self, line):
for func in self.functions:
paramlist = [param['name'] for param in func['params']]
print("%s(%s)" % (func['name'], ', '.join(paramlist)))
def do_quit(self, line):
sys.exit(0)
def do_EOF(self, line):
sys.exit(0)
| aheck/reflectrpc | reflectrpc/rpcsh.py | Python | mit | 10,259 |
# -*- coding: utf-8 -*-
import datetime
from ..core.model import DeclarativeBase
from sqlalchemy import Column, DateTime, Index, Integer, Numeric, Text
class AccountTransaction(DeclarativeBase):
__tablename__ = 'account_transactions'
id = Column(Integer, nullable=False, primary_key=True)
account_id = Column(Integer, nullable=False)
debit = Column(Numeric, nullable=True)
credit = Column(Numeric, nullable=True)
notes = Column(Text, nullable=True)
created_at = Column(DateTime, nullable=False, default=datetime.utcnow)
Index('account_id_on_account_transactions', AccountTransaction.account_id)
| masom/doorbot-api-python | doorbot/models/account_transaction.py | Python | mit | 631 |
"""
Management for multiprocessing in varial.
WorkerPool creates child processes that do not daemonize. Therefore they can
spawn further children, which is handy to parallelize at many levels (e.g. run
parallel tools at every level of a complex directory structure). Nevertheless,
only a given number of workers runs at the same time, thus keeping the processes
from dead-locking resources.
Exceptions from the worker processes are passed to the host process.
When using this module it is important to watch memory consumption! Every
spawned process is copied in memory. It is best to load data within a worker,
not in the host process.
Example usage:
>>> import varial.multiproc
>>>
>>> my_input_list = [1,2,3,4]
>>> n_workers = min(varial.settings.max_num_processes, len(my_input_list))
>>>
>>> def my_square(num):
>>> return num*num
>>>
>>> with varial.multiproc.WorkerPool(n_workers) as pool:
>>> for res in pool.imap_unordered(my_square, my_input_list):
>>> print res
Note that in the example, the input and output data of the worker is just an
int. If large amounts of data need to be transferred, it is better to let the
worker store the data on disk and read it back in the host.
"""
import multiprocessing.pool
import settings
import sys
_cpu_semaphore = None
_traceback_printlock = None # this is never released (only print once)
pre_fork_cbs = []
pre_join_cbs = []
############################## errors handling: catch and raise on hostside ###
def _catch_exception_in_worker(func, *args, **kws):
try:
res = func(*args, **kws)
# TODO: write SIGINT handler and kill again
except KeyboardInterrupt as e:
res = 'Exception', e
except Exception as e:
res = 'Exception', e
if _traceback_printlock.acquire(block=False):
import traceback
tb = ''.join(traceback.format_exception(*sys.exc_info()))
print '='*80
print 'EXCEPTION IN PARALLEL EXECUTION START'
print '='*80
print tb
print '='*80
print 'EXCEPTION IN PARALLEL EXECUTION END'
print '='*80
return res
def _gen_raise_exception_in_host(iterator):
for i in iterator:
if isinstance(i, tuple) and i and i[0] == 'Exception':
raise i[1]
else:
yield i
def _exec_in_worker(func_and_item):
"""parallel execution with cpu control and exception catching."""
with _cpu_semaphore:
return _catch_exception_in_worker(*func_and_item)
################################ special worker-pool to allow for recursion ###
class NoDaemonProcess(multiprocessing.Process):
# make 'daemon' attribute always return False
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
def run(self):
try:
super(NoDaemonProcess, self).run()
except (KeyboardInterrupt, IOError):
exit(-1)
class WorkerPool(multiprocessing.pool.Pool):
Process = NoDaemonProcess
def __init__(self, *args, **kws):
global _cpu_semaphore, _traceback_printlock
# prepare parallelism (only once for the all processes)
self.me_created_semaphore = False
if _cpu_semaphore:
# process with pool is supposed to be waiting a lot
_cpu_semaphore.release()
else:
self.me_created_semaphore = True
n_procs = settings.max_num_processes
_cpu_semaphore = multiprocessing.BoundedSemaphore(n_procs)
_traceback_printlock = multiprocessing.RLock()
for func in pre_fork_cbs:
func()
# go parallel
super(WorkerPool, self).__init__(*args, **kws)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
self.join()
def imap_unordered(self, func, iterable, chunksize=1):
iterable = ((func, i) for i in iterable)
res = super(WorkerPool, self).imap_unordered(
_exec_in_worker, iterable, chunksize
)
res = _gen_raise_exception_in_host(res)
return res
def close(self):
global _cpu_semaphore, _traceback_printlock
for func in pre_join_cbs:
func()
if self.me_created_semaphore:
_cpu_semaphore = None
_traceback_printlock = None
else:
# must re-acquire before leaving
_cpu_semaphore.acquire()
super(WorkerPool, self).close()
| HeinerTholen/Varial | varial/multiproc.py | Python | gpl-3.0 | 4,580 |
from robot.api import deco
def passing_handler(*args):
for arg in args:
print arg,
return ', '.join(args)
def failing_handler(*args):
if len(args) == 0:
msg = 'Failure'
else:
msg = 'Failure: %s' % ' '.join(args)
raise AssertionError(msg)
class GetKeywordNamesLibrary:
def __init__(self):
self.this_is_not_keyword = 'This is just an attribute!!'
def get_keyword_names(self):
marked_keywords = [name for name in dir(self) if hasattr(getattr(self, name), 'robot_name')]
other_keywords = ['Get Keyword That Passes', 'Get Keyword That Fails',
'keyword_in_library_itself', 'non_existing_kw',
'this_is_not_keyword']
return marked_keywords + other_keywords
def __getattr__(self, name):
if name == 'Get Keyword That Passes':
return passing_handler
if name == 'Get Keyword That Fails':
return failing_handler
raise AttributeError("Non-existing keyword '%s'" % name)
def keyword_in_library_itself(self):
msg = 'No need for __getattr__ here!!'
print msg
return msg
@deco.keyword('Name Set Using Robot Name Attribute')
def name_set_in_method_signature(self):
pass
@deco.keyword
def keyword_name_should_not_change(self):
pass
@deco.keyword('Add ${count} Copies Of ${item} To Cart')
def add_copies_to_cart(self, count, item):
return count, item
| xiaokeng/robotframework | atest/testresources/testlibs/GetKeywordNamesLibrary.py | Python | apache-2.0 | 1,526 |
import django
from django.conf import settings
from django.contrib.admin.util import lookup_field, display_for_field
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.encoding import smart_unicode, force_unicode
from django.template import Library
from django.contrib.admin.templatetags.admin_list import _boolean_icon, result_headers
if django.VERSION >= (1, 2, 3):
from django.contrib.admin.templatetags.admin_list import result_hidden_fields
else:
result_hidden_fields = lambda cl: []
register = Library()
MPTT_ADMIN_LEVEL_INDENT = getattr(settings, 'MPTT_ADMIN_LEVEL_INDENT', 10)
IS_GRAPPELLI_INSTALLED = True if 'grappelli' in settings.INSTALLED_APPS else False
###
# Ripped from contrib.admin's (1.3.1) items_for_result tag.
# The only difference is we're indenting nodes according to their level.
def mptt_items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
##### MPTT ADDITION START
# figure out which field to indent
mptt_indent_field = getattr(cl.model_admin, 'mptt_indent_field', None)
if not mptt_indent_field:
for field_name in cl.list_display:
try:
f = cl.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
if mptt_indent_field is None:
attr = getattr(result, field_name, None)
if callable(attr):
# first callable field, use this if we can't find any model fields
mptt_indent_field = field_name
else:
# first model field, use this one
mptt_indent_field = field_name
break
##### MPTT ADDITION END
# figure out how much to indent
mptt_level_indent = getattr(cl.model_admin, 'mptt_level_indent', MPTT_ADMIN_LEVEL_INDENT)
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except (AttributeError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
if field_name == u'action_checkbox':
row_class = ' class="action-checkbox"'
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = _boolean_icon(value)
else:
result_repr = smart_unicode(value)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if not allow_tags:
result_repr = escape(result_repr)
else:
result_repr = mark_safe(result_repr)
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = escape(field_val)
else:
result_repr = display_for_field(value, f)
if isinstance(f, models.DateField)\
or isinstance(f, models.TimeField)\
or isinstance(f, models.ForeignKey):
row_class = ' class="nowrap"'
if force_unicode(result_repr) == '':
result_repr = mark_safe(' ')
##### MPTT ADDITION START
if field_name == mptt_indent_field:
level = getattr(result, result._mptt_meta.level_attr)
padding_attr = ' style="padding-left:%spx"' % (5 + mptt_level_indent * level)
else:
padding_attr = ''
##### MPTT ADDITION END
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True: 'th', False: 'td'}[first]
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = repr(force_unicode(value))[1:]
##### MPTT SUBSTITUTION START
yield mark_safe(u'<%s%s%s><a href="%s"%s>%s</a></%s>' % \
(table_tag, row_class, padding_attr, url, (cl.is_popup and ' onclick="opener.dismissRelatedLookupPopup(window, %s); return false;"' % result_id or ''), conditional_escape(result_repr), table_tag))
##### MPTT SUBSTITUTION END
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf))
else:
result_repr = conditional_escape(result_repr)
##### MPTT SUBSTITUTION START
yield mark_safe(u'<td%s%s>%s</td>' % (row_class, padding_attr, result_repr))
##### MPTT SUBSTITUTION END
if form and not form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(u'<td>%s</td>' % force_unicode(form[cl.model._meta.pk.name]))
def mptt_results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield list(mptt_items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield list(mptt_items_for_result(cl, res, None))
def mptt_result_list(cl):
"""
Displays the headers and data list together
"""
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': list(result_headers(cl)),
'results': list(mptt_results(cl))}
# custom template is merely so we can strip out sortable-ness from the column headers
# Based on admin/change_list_results.html (1.3.1)
if IS_GRAPPELLI_INSTALLED:
mptt_result_list = register.inclusion_tag("admin/grappelli_mptt_change_list_results.html")(mptt_result_list)
else:
mptt_result_list = register.inclusion_tag("admin/mptt_change_list_results.html")(mptt_result_list)
| zzzombat/lucid-python-django-mptt | mptt/templatetags/mptt_admin.py | Python | mit | 7,085 |
"""Code for finding content."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import collections
import os
from ... import types as t
from ...util import (
ANSIBLE_SOURCE_ROOT,
)
from .. import (
PathProvider,
)
class Layout:
"""Description of content locations and helper methods to access content."""
def __init__(self,
root, # type: str
paths, # type: t.List[str]
): # type: (...) -> None
self.root = root
self.__paths = paths # contains both file paths and symlinked directory paths (ending with os.path.sep)
self.__files = [path for path in paths if not path.endswith(os.path.sep)] # contains only file paths
self.__paths_tree = paths_to_tree(self.__paths)
self.__files_tree = paths_to_tree(self.__files)
def all_files(self, include_symlinked_directories=False): # type: (bool) -> t.List[str]
"""Return a list of all file paths."""
if include_symlinked_directories:
return self.__paths
return self.__files
def walk_files(self, directory, include_symlinked_directories=False): # type: (str, bool) -> t.List[str]
"""Return a list of file paths found recursively under the given directory."""
if include_symlinked_directories:
tree = self.__paths_tree
else:
tree = self.__files_tree
parts = directory.rstrip(os.sep).split(os.sep)
item = get_tree_item(tree, parts)
if not item:
return []
directories = collections.deque(item[0].values())
files = list(item[1])
while directories:
item = directories.pop()
directories.extend(item[0].values())
files.extend(item[1])
return files
def get_dirs(self, directory): # type: (str) -> t.List[str]
"""Return a list directory paths found directly under the given directory."""
parts = directory.rstrip(os.sep).split(os.sep)
item = get_tree_item(self.__files_tree, parts)
return [os.path.join(directory, key) for key in item[0].keys()] if item else []
def get_files(self, directory): # type: (str) -> t.List[str]
"""Return a list of file paths found directly under the given directory."""
parts = directory.rstrip(os.sep).split(os.sep)
item = get_tree_item(self.__files_tree, parts)
return item[1] if item else []
class ContentLayout(Layout):
"""Information about the current Ansible content being tested."""
def __init__(self,
root, # type: str
paths, # type: t.List[str]
plugin_paths, # type: t.Dict[str, str]
collection=None, # type: t.Optional[CollectionDetail]
integration_path=None, # type: t.Optional[str]
unit_path=None, # type: t.Optional[str]
unit_module_path=None, # type: t.Optional[str]
unit_module_utils_path=None, # type: t.Optional[str]
): # type: (...) -> None
super(ContentLayout, self).__init__(root, paths)
self.plugin_paths = plugin_paths
self.collection = collection
self.integration_path = integration_path
self.integration_targets_path = os.path.join(integration_path, 'targets')
self.integration_vars_path = os.path.join(integration_path, 'integration_config.yml')
self.unit_path = unit_path
self.unit_module_path = unit_module_path
self.unit_module_utils_path = unit_module_utils_path
self.is_ansible = root == ANSIBLE_SOURCE_ROOT
@property
def prefix(self): # type: () -> str
"""Return the collection prefix or an empty string if not a collection."""
if self.collection:
return self.collection.prefix
return ''
@property
def module_path(self): # type: () -> t.Optional[str]
"""Return the path where modules are found, if any."""
return self.plugin_paths.get('modules')
@property
def module_utils_path(self): # type: () -> t.Optional[str]
"""Return the path where module_utils are found, if any."""
return self.plugin_paths.get('module_utils')
@property
def module_utils_powershell_path(self): # type: () -> t.Optional[str]
"""Return the path where powershell module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'powershell')
return self.plugin_paths.get('module_utils')
@property
def module_utils_csharp_path(self): # type: () -> t.Optional[str]
"""Return the path where csharp module_utils are found, if any."""
if self.is_ansible:
return os.path.join(self.plugin_paths['module_utils'], 'csharp')
return self.plugin_paths.get('module_utils')
class CollectionDetail:
"""Details about the layout of the current collection."""
def __init__(self,
name, # type: str
namespace, # type: str
root, # type: str
): # type: (...) -> None
self.name = name
self.namespace = namespace
self.root = root
self.full_name = '%s.%s' % (namespace, name)
self.prefix = '%s.' % self.full_name
self.directory = os.path.join('ansible_collections', namespace, name)
class LayoutProvider(PathProvider):
"""Base class for layout providers."""
PLUGIN_TYPES = (
'action',
'become',
'cache',
'callback',
'cliconf',
'connection',
'doc_fragments',
'filter',
'httpapi',
'inventory',
'lookup',
'module_utils',
'modules',
'netconf',
'shell',
'strategy',
'terminal',
'test',
'vars',
)
@abc.abstractmethod
def create(self, root, paths): # type: (str, t.List[str]) -> ContentLayout
"""Create a layout using the given root and paths."""
def paths_to_tree(paths): # type: (t.List[str]) -> t.Tuple(t.Dict[str, t.Any], t.List[str])
"""Return a filesystem tree from the given list of paths."""
tree = {}, []
for path in paths:
parts = path.split(os.sep)
root = tree
for part in parts[:-1]:
if part not in root[0]:
root[0][part] = {}, []
root = root[0][part]
root[1].append(path)
return tree
def get_tree_item(tree, parts): # type: (t.Tuple(t.Dict[str, t.Any], t.List[str]), t.List[str]) -> t.Optional[t.Tuple(t.Dict[str, t.Any], t.List[str])]
"""Return the portion of the tree found under the path given by parts, or None if it does not exist."""
root = tree
for part in parts:
root = root[0].get(part)
if not root:
return None
return root
| amenonsen/ansible | test/lib/ansible_test/_internal/provider/layout/__init__.py | Python | gpl-3.0 | 6,980 |
#!/usr/bin/env python3
from person import Person, Manager
bob = Person('Bob Smith')
sue = Person('Sue Jones', job='dev', pay=100000)
tom = Manager('Tom Jones', 50000)
import shelve
db = shelve.open('persondb')
for obj in (bob, sue, tom):
db[obj.name] = obj
db.close()
import glob
print(glob.glob('person*'))
print(open('persondb.dir').read())
print(open('persondb.dat', 'rb').read())
db = shelve.open('persondb')
print(len(db))
print(list(db.keys()))
bob = db['Bob Smith']
print(bob)
print(bob.last_name())
for key in db:
print(key, '=>', db[key])
for key in sorted(db):
print(key, '=>', db[key])
sue = db['Sue Jones']
sue.give_raise(.1)
db['Sue Jones'] = sue
db.close()
db = shelve.open('persondb')
for key in sorted(db):
print(key, '=>', db[key])
db.close()
| eroicaleo/LearningPython | ch28/makedb.py | Python | mit | 791 |
"""
Copyright 2016 Christian Fobel
This file is part of command_plugin.
command_plugin is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
command_plugin is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License
along with command_plugin. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
import gobject
import zmq
from .plugin import CommandZmqPlugin
from ...app_context import get_hub_uri
from ...plugin_manager import (PluginGlobals, SingletonPlugin, IPlugin,
implements)
logger = logging.getLogger(__name__)
PluginGlobals.push_env('microdrop')
class CommandPlugin(SingletonPlugin):
"""
This class is automatically registered with the PluginManager.
"""
implements(IPlugin)
plugin_name = 'wheelerlab.command_plugin'
def __init__(self):
self.name = self.plugin_name
self.plugin = None
self.command_timeout_id = None
def on_plugin_enable(self):
"""
Handler called once the plugin instance is enabled.
Note: if you inherit your plugin from AppDataController and don't
implement this handler, by default, it will automatically load all
app options from the config file. If you decide to overide the
default handler, you should call:
AppDataController.on_plugin_enable(self)
to retain this functionality.
"""
self.cleanup()
self.plugin = CommandZmqPlugin(self, self.name, get_hub_uri())
# Initialize sockets.
self.plugin.reset()
def check_command_socket():
try:
msg_frames = (self.plugin.command_socket
.recv_multipart(zmq.NOBLOCK))
except zmq.Again:
pass
else:
self.plugin.on_command_recv(msg_frames)
return True
self.command_timeout_id = gobject.timeout_add(10, check_command_socket)
def cleanup(self):
if self.command_timeout_id is not None:
gobject.source_remove(self.command_timeout_id)
if self.plugin is not None:
self.plugin = None
def on_plugin_disable(self):
"""
Handler called once the plugin instance is disabled.
"""
self.cleanup()
def on_app_exit(self):
"""
Handler called just before the MicroDrop application exits.
"""
self.cleanup()
PluginGlobals.pop_env()
| ryanfobel/microdrop | microdrop/core_plugins/command_plugin/microdrop_plugin.py | Python | gpl-3.0 | 2,862 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Various utilities for WebJournal, e.g. config parser, etc.
"""
import time
import datetime
import calendar
import re
import os
import cPickle
import math
import urllib
from MySQLdb import OperationalError
from xml.dom import minidom
from urlparse import urlparse
from invenio.config import \
CFG_ETCDIR, \
CFG_SITE_URL, \
CFG_CACHEDIR, \
CFG_SITE_LANG, \
CFG_ACCESS_CONTROL_LEVEL_SITE, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_DEVEL_SITE
from invenio.dbquery import run_sql
from invenio.bibformat_engine import BibFormatObject
from invenio.search_engine import search_pattern, record_exists
from invenio.messages import gettext_set_language
from invenio.errorlib import register_exception
from invenio.urlutils import make_invenio_opener
WEBJOURNAL_OPENER = make_invenio_opener('WebJournal')
########################### REGULAR EXPRESSIONS ######################
header_pattern = re.compile('<p\s*(align=justify)??>\s*<strong>(?P<header>.*?)</strong>\s*</p>')
header_pattern2 = re.compile('<p\s*(class="articleHeader").*?>(?P<header>.*?)</p>')
para_pattern = re.compile('<p.*?>(?P<paragraph>.+?)</p>', re.DOTALL)
img_pattern = re.compile('<img.*?src=("|\')?(?P<image>\S+?)("|\'|\s).*?/>', re.DOTALL)
image_pattern = re.compile(r'''
(<a\s*href=["']?(?P<hyperlink>\S*)["']?>)?# get the link location for the image
\s*# after each tag we can have arbitrary whitespaces
<center># the image is always centered
\s*
<img\s*(class=["']imageScale["'])*?\s*src=(?P<image>\S*)\s*border=1\s*(/)?># getting the image itself
\s*
</center>
\s*
(</a>)?
(<br />|<br />|<br/>)*# the caption can be separated by any nr of line breaks
(
<b>
\s*
<i>
\s*
<center>(?P<caption>.*?)</center># getting the caption
\s*
</i>
\s*
</b>
)?''', re.DOTALL | re.VERBOSE | re.IGNORECASE )
#'
############################## FEATURED RECORDS ######################
def get_featured_records(journal_name):
"""
Returns the 'featured' records i.e. records chosen to be displayed
with an image on the main page, in the widgets section, for the
given journal.
parameter:
journal_name - (str) the name of the journal for which we want
to get the featured records
returns:
list of tuples (recid, img_url)
"""
try:
feature_file = open('%s/webjournal/%s/featured_record' % \
(CFG_ETCDIR, journal_name))
except:
return []
records = feature_file.readlines()
return [(record.split('---', 1)[0], record.split('---', 1)[1]) \
for record in records if "---" in record]
def add_featured_record(journal_name, recid, img_url):
"""
Adds the given record to the list of featured records of the given
journal.
parameters:
journal_name - (str) the name of the journal to which the record
should be added.
recid - (int) the record id of the record to be featured.
img_url - (str) a url to an image icon displayed along the
featured record.
returns:
0 if everything went ok
1 if record is already in the list
2 if other problems
"""
# Check that record is not already there
featured_records = get_featured_records(journal_name)
for featured_recid, featured_img in featured_records:
if featured_recid == str(recid):
return 1
try:
fptr = open('%s/webjournal/%s/featured_record'
% (CFG_ETCDIR, journal_name), "a")
fptr.write(str(recid) + '---' + img_url + '\n')
fptr.close()
except:
return 2
return 0
def remove_featured_record(journal_name, recid):
"""
Removes the given record from the list of featured records of the
given journal.
parameters:
journal_name - (str) the name of the journal to which the record
should be added.
recid - (int) the record id of the record to be featured.
"""
featured_records = get_featured_records(journal_name)
try:
fptr = open('%s/webjournal/%s/featured_record'
% (CFG_ETCDIR, journal_name), "w")
for featured_recid, featured_img in featured_records:
if str(featured_recid) != str(recid):
fptr.write(str(featured_recid) + '---' + featured_img + \
'\n')
fptr.close()
except:
return 1
return 0
############################ ARTICLES RELATED ########################
def get_order_dict_from_recid_list(recids, journal_name, issue_number,
newest_first=False,
newest_only=False):
"""
Returns the ordered list of input recids, for given
'issue_number'.
Since there might be several articles at the same position, the
returned structure is a dictionary with keys being order number
indicated in record metadata, and values being list of recids for
this order number (recids for one position are ordered from
highest to lowest recid).
Eg: {'1': [2390, 2386, 2385],
'3': [2388],
'2': [2389],
'4': [2387]}
Parameters:
recids - a list of all recid's that should be brought
into order
journal_name - the name of the journal
issue_number - *str* the issue_number for which we are
deriving the order
newest_first - *bool* if True, new articles should be placed
at beginning of the list. If so, their
position/order will be negative integers
newest_only - *bool* if only new articles should be returned
Returns:
ordered_records: a dictionary with the recids ordered by
keys
"""
ordered_records = {}
ordered_new_records = {}
records_without_defined_order = []
new_records_without_defined_order = []
for record in recids:
temp_rec = BibFormatObject(record)
articles_info = temp_rec.fields('773__')
for article_info in articles_info:
if article_info.get('n', '') == issue_number or \
'0' + article_info.get('n', '') == issue_number:
if article_info.has_key('c') and \
article_info['c'].isdigit():
order_number = int(article_info.get('c', ''))
if (newest_first or newest_only) and \
is_new_article(journal_name, issue_number, record):
if ordered_new_records.has_key(order_number):
ordered_new_records[order_number].append(record)
else:
ordered_new_records[order_number] = [record]
elif not newest_only:
if ordered_records.has_key(order_number):
ordered_records[order_number].append(record)
else:
ordered_records[order_number] = [record]
else:
# No order? No problem! Append it at the end.
if newest_first and is_new_article(journal_name, issue_number, record):
new_records_without_defined_order.append(record)
elif not newest_only:
records_without_defined_order.append(record)
# Append records without order at the end of the list
if records_without_defined_order:
if ordered_records:
ordered_records[max(ordered_records.keys()) + 1] = records_without_defined_order
else:
ordered_records[1] = records_without_defined_order
# Append new records without order at the end of the list of new
# records
if new_records_without_defined_order:
if ordered_new_records:
ordered_new_records[max(ordered_new_records.keys()) + 1] = new_records_without_defined_order
else:
ordered_new_records[1] = new_records_without_defined_order
# Append new records at the beginning of the list of 'old'
# records. To do so, use negative integers
if ordered_new_records:
highest_new_record_order = max(ordered_new_records.keys())
for order, new_records in ordered_new_records.iteritems():
ordered_records[- highest_new_record_order + order - 1] = new_records
for (order, records) in ordered_records.iteritems():
# Reverse so that if there are several articles at same
# positon, newest appear first
records.reverse()
return ordered_records
def get_journal_articles(journal_name, issue, category,
newest_first=False, newest_only=False):
"""
Returns the recids in given category and journal, for given issue
number. The returned recids are grouped according to their 773__c
field.
Example of returned value:
{'1': [2390, 2386, 2385],
'3': [2388],
'2': [2389],
'4': [2387]}
Parameters:
journal_name - *str* the name of the journal (as used in URLs)
issue - *str* the issue. Eg: "08/2007"
category - *str* the name of the category
newest_first - *bool* if True, new articles should be placed
at beginning of the list. If so, their
position/order will be negative integers
newest_only - *bool* if only new articles should be returned
"""
use_cache = True
current_issue = get_current_issue(CFG_SITE_LANG, journal_name)
if issue_is_later_than(issue, current_issue):
# If we are working on unreleased issue, do not use caching
# mechanism
use_cache = False
if use_cache:
cached_articles = _get_cached_journal_articles(journal_name, issue, category)
if cached_articles is not None:
ordered_articles = get_order_dict_from_recid_list(cached_articles,
journal_name,
issue,
newest_first,
newest_only)
return ordered_articles
# Retrieve the list of rules that map Category -> Search Pattern.
# Keep only the rule matching our category
config_strings = get_xml_from_config(["record/rule"], journal_name)
category_to_search_pattern_rules = config_strings["record/rule"]
try:
matching_rule = [rule.split(',', 1) for rule in \
category_to_search_pattern_rules \
if rule.split(',')[0] == category]
except:
return []
recids_issue = search_pattern(p='773__n:%s -980:DELETED' % issue)
recids_rule = search_pattern(p=matching_rule[0][1])
if issue[0] == '0':
# search for 09/ and 9/
recids_issue.union_update(search_pattern(p='773__n:%s -980:DELETED' % issue.lstrip('0')))
recids_rule.intersection_update(recids_issue)
recids = [recid for recid in recids_rule if record_exists(recid) == 1]
if use_cache:
_cache_journal_articles(journal_name, issue, category, recids)
ordered_articles = get_order_dict_from_recid_list(recids,
journal_name,
issue,
newest_first,
newest_only)
return ordered_articles
def _cache_journal_articles(journal_name, issue, category, articles):
"""
Caches given articles IDs.
"""
journal_cache_path = get_journal_article_cache_path(journal_name,
issue)
try:
journal_cache_file = open(journal_cache_path, 'r')
journal_info = cPickle.load(journal_cache_file)
journal_cache_file.close()
except cPickle.PickleError, e:
journal_info = {}
except IOError:
journal_info = {}
except EOFError:
journal_info = {}
except ValueError:
journal_info = {}
if not journal_info.has_key('journal_articles'):
journal_info['journal_articles'] = {}
journal_info['journal_articles'][category] = articles
# Create cache directory if it does not exist
journal_cache_dir = os.path.dirname(journal_cache_path)
if not os.path.exists(journal_cache_dir):
try:
os.makedirs(journal_cache_dir)
except:
return False
journal_cache_file = open(journal_cache_path, 'w')
cPickle.dump(journal_info, journal_cache_file)
journal_cache_file.close()
return True
def _get_cached_journal_articles(journal_name, issue, category):
"""
Retrieve the articles IDs cached for this journal.
Returns None if cache does not exist or more than 5 minutes old
"""
# Check if our cache is more or less up-to-date (not more than 5
# minutes old)
try:
journal_cache_path = get_journal_article_cache_path(journal_name,
issue)
last_update = os.path.getctime(journal_cache_path)
except Exception, e :
return None
now = time.time()
if (last_update + 5*60) < now:
return None
# Get from cache
try:
journal_cache_file = open(journal_cache_path, 'r')
journal_info = cPickle.load(journal_cache_file)
journal_articles = journal_info.get('journal_articles', {}).get(category, None)
journal_cache_file.close()
except cPickle.PickleError, e:
journal_articles = None
except IOError:
journal_articles = None
except EOFError:
journal_articles = None
except ValueError:
journal_articles = None
return journal_articles
def is_new_article(journal_name, issue, recid):
"""
Check if given article should be considered as new or not.
New articles are articles that have never appeared in older issues
than given one.
"""
article_found_in_older_issue = False
temp_rec = BibFormatObject(recid)
publication_blocks = temp_rec.fields('773__')
for publication_block in publication_blocks:
this_issue_number, this_issue_year = issue.split('/')
issue_number, issue_year = publication_block.get('n', '/').split('/', 1)
if int(issue_year) < int(this_issue_year):
# Found an older issue
article_found_in_older_issue = True
break
elif int(issue_year) == int(this_issue_year) and \
int(issue_number) < int(this_issue_number):
# Found an older issue
article_found_in_older_issue = True
break
return not article_found_in_older_issue
############################ CATEGORIES RELATED ######################
def get_journal_categories(journal_name, issue=None):
"""
List the categories for the given journal and issue.
Returns categories in same order as in config file.
Parameters:
journal_name - *str* the name of the journal (as used in URLs)
issue - *str* the issue. Eg:'08/2007'. If None, consider
all categories defined in journal config
"""
categories = []
current_issue = get_current_issue(CFG_SITE_LANG, journal_name)
config_strings = get_xml_from_config(["record/rule"], journal_name)
all_categories = [rule.split(',')[0] for rule in \
config_strings["record/rule"]]
if issue is None:
return all_categories
for category in all_categories:
recids = get_journal_articles(journal_name,
issue,
category)
if len(recids.keys()) > 0:
categories.append(category)
return categories
def get_category_query(journal_name, category):
"""
Returns the category definition for the given category and journal name
Parameters:
journal_name - *str* the name of the journal (as used in URLs)
categoy - *str* a category name, as found in the XML config
"""
config_strings = get_xml_from_config(["record/rule"], journal_name)
category_to_search_pattern_rules = config_strings["record/rule"]
try:
matching_rule = [rule.split(',', 1)[1].strip() for rule in \
category_to_search_pattern_rules \
if rule.split(',')[0] == category]
except:
return None
return matching_rule[0]
######################### JOURNAL CONFIG VARS ######################
cached_parsed_xml_config = {}
def get_xml_from_config(nodes, journal_name):
"""
Returns values from the journal configuration file.
The needed values can be specified by node name, or by a hierarchy
of nodes names using '/' as character to mean 'descendant of'.
Eg. 'record/rule' to get all the values of 'rule' tags inside the
'record' node
Returns a dictionary with a key for each query and a list of
strings (innerXml) results for each key.
Has a special field "config_fetching_error" that returns an error when
something has gone wrong.
"""
# Get and open the config file
results = {}
if cached_parsed_xml_config.has_key(journal_name):
config_file = cached_parsed_xml_config[journal_name]
else:
config_path = '%s/webjournal/%s/%s-config.xml' % \
(CFG_ETCDIR, journal_name, journal_name)
config_file = minidom.Document
try:
config_file = minidom.parse("%s" % config_path)
except:
# todo: raise exception "error: no config file found"
results["config_fetching_error"] = "could not find config file"
return results
else:
cached_parsed_xml_config[journal_name] = config_file
for node_path in nodes:
node = config_file
for node_path_component in node_path.split('/'):
# pylint: disable=E1103
# The node variable can be rewritten in the loop and therefore
# its type can change.
if node != config_file and node.length > 0:
# We have a NodeList object: consider only first child
node = node.item(0)
# pylint: enable=E1103
try:
node = node.getElementsByTagName(node_path_component)
except:
# WARNING, config did not have such value
node = []
break
results[node_path] = []
for result in node:
try:
result_string = result.firstChild.toxml(encoding="utf-8")
except:
# WARNING, config did not have such value
continue
results[node_path].append(result_string)
return results
def get_journal_issue_field(journal_name):
"""
Returns the MARC field in which this journal expects to find
the issue number. Read this from the journal config file
Parameters:
journal_name - *str* the name of the journal (as used in URLs)
"""
config_strings = get_xml_from_config(["issue_number"], journal_name)
try:
issue_field = config_strings["issue_number"][0]
except:
issue_field = '773__n'
return issue_field
def get_journal_css_url(journal_name, type='screen'):
"""
Returns URL to this journal's CSS.
Parameters:
journal_name - *str* the name of the journal (as used in URLs)
type - *str* 'screen' or 'print', depending on the kind
of CSS
"""
config_strings = get_xml_from_config([type], journal_name)
css_path = ''
try:
css_path = config_strings["screen"][0]
except Exception:
register_exception(req=None,
suffix="No css file for journal %s. Is this right?" % \
journal_name)
return CFG_SITE_URL + '/' + css_path
def get_journal_submission_params(journal_name):
"""
Returns the (doctype, identifier element, identifier field) for
the submission of articles in this journal, so that it is possible
to build direct submission links.
Parameter:
journal_name - *str* the name of the journal (as used in URLs)
"""
doctype = ''
identifier_field = ''
identifier_element = ''
config_strings = get_xml_from_config(["submission/doctype"], journal_name)
if config_strings.get('submission/doctype', ''):
doctype = config_strings['submission/doctype'][0]
config_strings = get_xml_from_config(["submission/identifier_element"], journal_name)
if config_strings.get('submission/identifier_element', ''):
identifier_element = config_strings['submission/identifier_element'][0]
config_strings = get_xml_from_config(["submission/identifier_field"], journal_name)
if config_strings.get('submission/identifier_field', ''):
identifier_field = config_strings['submission/identifier_field'][0]
else:
identifier_field = '037__a'
return (doctype, identifier_element, identifier_field)
def get_journal_draft_keyword_to_remove(journal_name):
"""
Returns the keyword that should be removed from the article
metadata in order to move the article from Draft to Ready
"""
config_strings = get_xml_from_config(["draft_keyword"], journal_name)
if config_strings.get('draft_keyword', ''):
return config_strings['draft_keyword'][0]
return ''
def get_journal_alert_sender_email(journal_name):
"""
Returns the email address that should be used as send of the alert
email.
If not specified, use CFG_SITE_SUPPORT_EMAIL
"""
config_strings = get_xml_from_config(["alert_sender"], journal_name)
if config_strings.get('alert_sender', ''):
return config_strings['alert_sender'][0]
return CFG_SITE_SUPPORT_EMAIL
def get_journal_alert_recipient_email(journal_name):
"""
Returns the default email address of the recipients of the email
Return a string of comma-separated emails.
"""
if CFG_DEVEL_SITE:
# To be on the safe side, do not return the default alert recipients.
return ''
config_strings = get_xml_from_config(["alert_recipients"], journal_name)
if config_strings.get('alert_recipients', ''):
return config_strings['alert_recipients'][0]
return ''
def get_journal_collection_to_refresh_on_release(journal_name):
"""
Returns the list of collection to update (WebColl) upon release of
an issue.
"""
from invenio.search_engine import collection_reclist_cache
config_strings = get_xml_from_config(["update_on_release/collection"], journal_name)
return [coll for coll in config_strings.get('update_on_release/collection', []) if \
collection_reclist_cache.cache.has_key(coll)]
def get_journal_index_to_refresh_on_release(journal_name):
"""
Returns the list of indexed to update (BibIndex) upon release of
an issue.
"""
from invenio.bibindex_engine import get_index_id_from_index_name
config_strings = get_xml_from_config(["update_on_release/index"], journal_name)
return [index for index in config_strings.get('update_on_release/index', []) if \
get_index_id_from_index_name(index) != '']
def get_journal_template(template, journal_name, ln=CFG_SITE_LANG):
"""
Returns the journal templates name for the given template type
Raise an exception if template cannot be found.
"""
from invenio.webjournal_config import \
InvenioWebJournalTemplateNotFoundError
config_strings = get_xml_from_config([template], journal_name)
try:
index_page_template = 'webjournal' + os.sep + \
config_strings[template][0]
except:
raise InvenioWebJournalTemplateNotFoundError(ln,
journal_name,
template)
return index_page_template
def get_journal_name_intl(journal_name, ln=CFG_SITE_LANG):
"""
Returns the nice name of the journal, translated if possible
"""
_ = gettext_set_language(ln)
config_strings = get_xml_from_config(["niceName"], journal_name)
if config_strings.get('niceName', ''):
return _(config_strings['niceName'][0])
return ''
def get_journal_languages(journal_name):
"""
Returns the list of languages defined for this journal
"""
config_strings = get_xml_from_config(["languages"], journal_name)
if config_strings.get('languages', ''):
return [ln.strip() for ln in \
config_strings['languages'][0].split(',')]
return []
def get_journal_issue_grouping(journal_name):
"""
Returns the number of issue that are typically released at the
same time.
This is used if every two weeks you release an issue that should
contains issue of next 2 weeks (eg. at week 16, you relase an
issue named '16-17/2009')
This number should help in the admin interface to guess how to
release the next issue (can be overidden by user).
"""
config_strings = get_xml_from_config(["issue_grouping"], journal_name)
if config_strings.get('issue_grouping', ''):
issue_grouping = config_strings['issue_grouping'][0]
if issue_grouping.isdigit() and int(issue_grouping) > 0:
return int(issue_grouping)
return 1
def get_journal_nb_issues_per_year(journal_name):
"""
Returns the default number of issues per year for this journal.
This number should help in the admin interface to guess the next
issue number (can be overidden by user).
"""
config_strings = get_xml_from_config(["issues_per_year"], journal_name)
if config_strings.get('issues_per_year', ''):
issues_per_year = config_strings['issues_per_year'][0]
if issues_per_year.isdigit() and int(issues_per_year) > 0:
return int(issues_per_year)
return 52
def get_journal_preferred_language(journal_name, ln):
"""
Returns the most adequate language to display the journal, given a
language.
"""
languages = get_journal_languages(journal_name)
if ln in languages:
return ln
elif CFG_SITE_LANG in languages:
return CFG_SITE_LANG
elif languages:
return languages
else:
return CFG_SITE_LANG
def get_unreleased_issue_hiding_mode(journal_name):
"""
Returns how unreleased issue should be treated. Can be one of the
following string values:
'future' - only future unreleased issues are hidden. Past
unreleased one can be viewed
'all' - any unreleased issue (past and future) have to be
hidden
- 'none' - no unreleased issue is hidden
"""
config_strings = get_xml_from_config(["hide_unreleased_issues"], journal_name)
if config_strings.get('hide_unreleased_issues', ''):
hide_unreleased_issues = config_strings['hide_unreleased_issues'][0]
if hide_unreleased_issues in ['future', 'all', 'none']:
return hide_unreleased_issues
return 'all'
def get_first_issue_from_config(journal_name):
"""
Returns the first issue as defined from config. This should only
be useful when no issue have been released.
If not specified, returns the issue made of current week number
and year.
"""
config_strings = get_xml_from_config(["first_issue"], journal_name)
if config_strings.has_key('first_issue'):
return config_strings['first_issue'][0]
return time.strftime("%W/%Y", time.localtime())
######################## TIME / ISSUE FUNCTIONS ######################
def get_current_issue(ln, journal_name):
"""
Returns the current issue of a journal as a string.
Current issue is the latest released issue.
"""
journal_id = get_journal_id(journal_name, ln)
try:
current_issue = run_sql("""SELECT issue_number
FROM jrnISSUE
WHERE date_released <= NOW()
AND id_jrnJOURNAL=%s
ORDER BY date_released DESC
LIMIT 1""",
(journal_id,))[0][0]
except:
# start the first journal ever
current_issue = get_first_issue_from_config(journal_name)
run_sql("""INSERT INTO jrnISSUE (id_jrnJOURNAL, issue_number, issue_display)
VALUES(%s, %s, %s)""",
(journal_id,
current_issue,
current_issue))
return current_issue
def get_all_released_issues(journal_name):
"""
Returns the list of released issue, ordered by release date
Note that it only includes the issues that are considered as
released in the DB: it will not for example include articles that
have been imported in the system but not been released
"""
journal_id = get_journal_id(journal_name)
res = run_sql("""SELECT issue_number
FROM jrnISSUE
WHERE id_jrnJOURNAL = %s
AND UNIX_TIMESTAMP(date_released) != 0
ORDER BY date_released DESC""",
(journal_id,))
if res:
return [row[0] for row in res]
else:
return []
def get_next_journal_issues(current_issue_number, journal_name, n=2):
"""
This function suggests the 'n' next issue numbers
"""
number, year = current_issue_number.split('/', 1)
number = int(number)
year = int(year)
number_issues_per_year = get_journal_nb_issues_per_year(journal_name)
next_issues = [make_issue_number(journal_name,
((number - 1 + i) % (number_issues_per_year)) + 1,
year + ((number - 1 + i) / number_issues_per_year)) \
for i in range(1, n + 1)]
return next_issues
def get_grouped_issues(journal_name, issue_number):
"""
Returns all the issues grouped with a given one.
Issues are sorted from the oldest to newest one.
"""
grouped_issues = []
journal_id = get_journal_id(journal_name, CFG_SITE_LANG)
issue_display = get_issue_number_display(issue_number, journal_name)
res = run_sql("""SELECT issue_number
FROM jrnISSUE
WHERE id_jrnJOURNAL=%s AND issue_display=%s""",
(journal_id,
issue_display))
if res:
grouped_issues = [row[0] for row in res]
grouped_issues.sort(compare_issues)
return grouped_issues
def compare_issues(issue1, issue2):
"""
Comparison function for issues.
Returns:
-1 if issue1 is older than issue2
0 if issues are equal
1 if issue1 is newer than issue2
"""
issue1_number, issue1_year = issue1.split('/', 1)
issue2_number, issue2_year = issue2.split('/', 1)
if int(issue1_year) == int(issue2_year):
return cmp(int(issue1_number), int(issue2_number))
else:
return cmp(int(issue1_year), int(issue2_year))
def issue_is_later_than(issue1, issue2):
"""
Returns true if issue1 is later than issue2
"""
issue_number1, issue_year1 = issue1.split('/', 1)
issue_number2, issue_year2 = issue2.split('/', 1)
if int(issue_year1) > int(issue_year2):
return True
elif int(issue_year1) == int(issue_year2):
return int(issue_number1) > int(issue_number2)
else:
return False
def get_issue_number_display(issue_number, journal_name,
ln=CFG_SITE_LANG):
"""
Returns the display string for a given issue number.
"""
journal_id = get_journal_id(journal_name, ln)
issue_display = run_sql("""SELECT issue_display
FROM jrnISSUE
WHERE issue_number=%s
AND id_jrnJOURNAL=%s""",
(issue_number, journal_id))
if issue_display:
return issue_display[0][0]
else:
# Not yet released...
return issue_number
def make_issue_number(journal_name, number, year, for_url_p=False):
"""
Creates a normalized issue number representation with given issue
number (as int or str) and year (as int or str).
Reverse the year and number if for_url_p is True
"""
number_issues_per_year = get_journal_nb_issues_per_year(journal_name)
precision = len(str(number_issues_per_year))
number = int(str(number))
year = int(str(year))
if for_url_p:
return ("%i/%0" + str(precision) + "i") % \
(year, number)
else:
return ("%0" + str(precision) + "i/%i") % \
(number, year)
def get_release_datetime(issue, journal_name, ln=CFG_SITE_LANG):
"""
Gets the date at which an issue was released from the DB.
Returns None if issue has not yet been released.
See issue_to_datetime() to get the *theoretical* release time of an
issue.
"""
journal_id = get_journal_id(journal_name, ln)
try:
release_date = run_sql("""SELECT date_released
FROM jrnISSUE
WHERE issue_number=%s
AND id_jrnJOURNAL=%s""",
(issue, journal_id))[0][0]
except:
return None
if release_date:
return release_date
else:
return None
def get_announcement_datetime(issue, journal_name, ln=CFG_SITE_LANG):
"""
Get the date at which an issue was announced through the alert system.
Return None if not announced
"""
journal_id = get_journal_id(journal_name, ln)
try:
announce_date = run_sql("""SELECT date_announced
FROM jrnISSUE
WHERE issue_number=%s
AND id_jrnJOURNAL=%s""",
(issue, journal_id))[0][0]
except:
return None
if announce_date:
return announce_date
else:
return None
def datetime_to_issue(issue_datetime, journal_name):
"""
Returns the issue corresponding to the given datetime object.
If issue_datetime is too far in the future or in the past, gives
the best possible matching issue, or None, if it does not seem to
exist.
#If issue_datetime is too far in the future, return the latest
#released issue.
#If issue_datetime is too far in the past, return None
Parameters:
issue_datetime - *datetime* date of the issue to be retrieved
journal_name - *str* the name of the journal (as used in URLs)
"""
issue_number = None
journal_id = get_journal_id(journal_name)
# Try to discover how much days an issue is valid
nb_issues_per_year = get_journal_nb_issues_per_year(journal_name)
this_year_number_of_days = 365
if calendar.isleap(issue_datetime.year):
this_year_number_of_days = 366
issue_day_lifetime = math.ceil(float(this_year_number_of_days)/nb_issues_per_year)
res = run_sql("""SELECT issue_number, date_released
FROM jrnISSUE
WHERE date_released < %s
AND id_jrnJOURNAL = %s
ORDER BY date_released DESC LIMIT 1""",
(issue_datetime, journal_id))
if res and res[0][1]:
issue_number = res[0][0]
issue_release_date = res[0][1]
# Check that the result is not too far in the future:
if issue_release_date + datetime.timedelta(issue_day_lifetime) < issue_datetime:
# In principle, the latest issue will no longer be valid
# at that time
return None
else:
# Mmh, are we too far in the past? This can happen in the case
# of articles that have been imported in the system but never
# considered as 'released' in the database. So we should still
# try to approximate/match an issue:
if round(issue_day_lifetime) in [6, 7, 8]:
# Weekly issues. We can use this information to better
# match the issue number
issue_nb = int(issue_datetime.strftime('%W')) # = week number
else:
# Compute the number of days since beginning of year, and
# divide by the lifetime of an issue: we get the
# approximate issue_number
issue_nb = math.ceil((int(issue_datetime.strftime('%j')) / issue_day_lifetime))
issue_number = ("%0" + str(len(str(nb_issues_per_year)))+ "i/%i") % (issue_nb, issue_datetime.year)
# Now check if this issue exists in the system for this
# journal
if not get_journal_categories(journal_name, issue_number):
# This issue did not exist
return None
return issue_number
DAILY = 1
WEEKLY = 2
MONTHLY = 3
def issue_to_datetime(issue_number, journal_name, granularity=None):
"""
Returns the *theoretical* date of release for given issue: useful
if you release on Friday, but the issue date of the journal
should correspond to the next Monday.
This will correspond to the next day/week/month, depending on the
number of issues per year (or the 'granularity' if specified) and
the release time (if close to the end of a period defined by the
granularity, consider next period since release is made a bit in
advance).
See get_release_datetime() for the *real* release time of an issue
THIS FUNCTION SHOULD ONLY BE USED FOR INFORMATIVE DISPLAY PURPOSE,
AS IT GIVES APPROXIMATIVE RESULTS. Do not use it to make decisions.
Parameters:
issue_number - *str* issue number to consider
journal_name - *str* the name of the journal (as used in URLs)
granularity - *int* the granularity to consider
"""
# If we have released, we can use this information. Otherwise we
# have to approximate.
issue_date = get_release_datetime(issue_number, journal_name)
if not issue_date:
# Approximate release date
number, year = issue_number.split('/')
number = int(number)
year = int(year)
nb_issues_per_year = get_journal_nb_issues_per_year(journal_name)
this_year_number_of_days = 365
if calendar.isleap(year):
this_year_number_of_days = 366
issue_day_lifetime = float(this_year_number_of_days)/nb_issues_per_year
# Compute from beginning of the year
issue_date = datetime.datetime(year, 1, 1) + \
datetime.timedelta(days=int(round((number - 1) * issue_day_lifetime)))
# Okay, but if last release is not too far in the past, better
# compute from the release.
current_issue = get_current_issue(CFG_SITE_LANG, journal_name)
current_issue_time = get_release_datetime(current_issue, journal_name)
if current_issue_time.year == issue_date.year:
current_issue_number, current_issue_year = current_issue.split('/')
current_issue_number = int(current_issue_number)
# Compute from last release
issue_date = current_issue_time + \
datetime.timedelta(days=int((number - current_issue_number) * issue_day_lifetime))
# If granularity is not specifed, deduce from config
if granularity is None:
nb_issues_per_year = get_journal_nb_issues_per_year(journal_name)
if nb_issues_per_year > 250:
granularity = DAILY
elif nb_issues_per_year > 40:
granularity = WEEKLY
else:
granularity = MONTHLY
# Now we can adapt the date to match the granularity
if granularity == DAILY:
if issue_date.hour >= 15:
# If released after 3pm, consider it is the issue of the next
# day
issue_date = issue_date + datetime.timedelta(days=1)
elif granularity == WEEKLY:
(year, week_nb, day_nb) = issue_date.isocalendar()
if day_nb > 4:
# If released on Fri, Sat or Sun, consider that it is next
# week's issue.
issue_date = issue_date + datetime.timedelta(weeks=1)
# Get first day of the week
issue_date = issue_date - datetime.timedelta(days=issue_date.weekday())
else:
if issue_date.day > 22:
# If released last week of the month, consider release for
# next month
issue_date = issue_date.replace(month=issue_date.month+1)
date_string = issue_date.strftime("%Y %m 1")
issue_date = datetime.datetime(*(time.strptime(date_string, "%Y %m %d")[0:6]))
return issue_date
def get_number_of_articles_for_issue(issue, journal_name, ln=CFG_SITE_LANG):
"""
Function that returns a dictionary with all categories and number of
articles in each category.
"""
all_articles = {}
categories = get_journal_categories(journal_name, issue)
for category in categories:
all_articles[category] = len(get_journal_articles(journal_name, issue, category))
return all_articles
########################## JOURNAL RELATED ###########################
def get_journal_info_path(journal_name):
"""
Returns the path to the info file of the given journal. The info
file should be used to get information about a journal when database
is not available.
Returns None if path cannot be determined
"""
# We must make sure we don't try to read outside of webjournal
# cache dir
info_path = os.path.abspath("%s/webjournal/%s/info.dat" % \
(CFG_CACHEDIR, journal_name))
if info_path.startswith(CFG_CACHEDIR + '/webjournal/'):
return info_path
else:
return None
def get_journal_article_cache_path(journal_name, issue):
"""
Returns the path to cache file of the articles of a given issue
Returns None if path cannot be determined
"""
# We must make sure we don't try to read outside of webjournal
# cache dir
cache_path = os.path.abspath("%s/webjournal/%s/%s_articles_cache.dat" % \
(CFG_CACHEDIR, journal_name,
issue.replace('/', '_')))
if cache_path.startswith(CFG_CACHEDIR + '/webjournal/'):
return cache_path
else:
return None
def get_journal_id(journal_name, ln=CFG_SITE_LANG):
"""
Get the id for this journal from the DB. If DB is down, try to get
from cache.
"""
journal_id = None
from invenio.webjournal_config import InvenioWebJournalJournalIdNotFoundDBError
if CFG_ACCESS_CONTROL_LEVEL_SITE == 2:
# do not connect to the database as the site is closed for
# maintenance:
journal_info_path = get_journal_info_path(journal_name)
try:
journal_info_file = open(journal_info_path, 'r')
journal_info = cPickle.load(journal_info_file)
journal_id = journal_info.get('journal_id', None)
except cPickle.PickleError, e:
journal_id = None
except IOError:
journal_id = None
except ValueError:
journal_id = None
else:
try:
res = run_sql("SELECT id FROM jrnJOURNAL WHERE name=%s",
(journal_name,))
if len(res) > 0:
journal_id = res[0][0]
except OperationalError, e:
# Cannot connect to database. Try to read from cache
journal_info_path = get_journal_info_path(journal_name)
try:
journal_info_file = open(journal_info_path, 'r')
journal_info = cPickle.load(journal_info_file)
journal_id = journal_info['journal_id']
except cPickle.PickleError, e:
journal_id = None
except IOError:
journal_id = None
except ValueError:
journal_id = None
if journal_id is None:
raise InvenioWebJournalJournalIdNotFoundDBError(ln, journal_name)
return journal_id
def guess_journal_name(ln, journal_name=None):
"""
Tries to take a guess what a user was looking for on the server if
not providing a name for the journal, or if given journal name
does not match case of original journal.
"""
from invenio.webjournal_config import InvenioWebJournalNoJournalOnServerError
from invenio.webjournal_config import InvenioWebJournalNoNameError
journals_id_and_names = get_journals_ids_and_names()
if len(journals_id_and_names) == 0:
raise InvenioWebJournalNoJournalOnServerError(ln)
elif not journal_name and \
journals_id_and_names[0].has_key('journal_name'):
return journals_id_and_names[0]['journal_name']
elif len(journals_id_and_names) > 0:
possible_journal_names = [journal_id_and_name['journal_name'] for journal_id_and_name \
in journals_id_and_names \
if journal_id_and_name.get('journal_name', '').lower() == journal_name.lower()]
if possible_journal_names:
return possible_journal_names[0]
else:
raise InvenioWebJournalNoNameError(ln)
else:
raise InvenioWebJournalNoNameError(ln)
def get_journals_ids_and_names():
"""
Returns the list of existing journals IDs and names. Try to read
from the DB, or from cache if DB is not accessible.
"""
journals = []
if CFG_ACCESS_CONTROL_LEVEL_SITE == 2:
# do not connect to the database as the site is closed for
# maintenance:
files = os.listdir("%s/webjournal" % CFG_CACHEDIR)
info_files = [path + os.sep + 'info.dat' for path in files if \
os.path.isdir(path) and \
os.path.exists(path + os.sep + 'info.dat')]
for info_file in info_files:
try:
journal_info_file = open(info_file, 'r')
journal_info = cPickle.load(journal_info_file)
journal_id = journal_info.get('journal_id', None)
journal_name = journal_info.get('journal_name', None)
current_issue = journal_info.get('current_issue', None)
if journal_id is not None and \
journal_name is not None:
journals.append({'journal_id': journal_id,
'journal_name': journal_name,
'current_issue': current_issue})
except cPickle.PickleError, e:
# Well, can't do anything...
continue
except IOError:
# Well, can't do anything...
continue
except ValueError:
continue
else:
try:
res = run_sql("SELECT id, name FROM jrnJOURNAL ORDER BY id")
for journal_id, journal_name in res:
journals.append({'journal_id': journal_id,
'journal_name': journal_name})
except OperationalError, e:
# Cannot connect to database. Try to read from cache
files = os.listdir("%s/webjournal" % CFG_CACHEDIR)
info_files = [path + os.sep + 'info.dat' for path in files if \
os.path.isdir(path) and \
os.path.exists(path + os.sep + 'info.dat')]
for info_file in info_files:
try:
journal_info_file = open(info_file, 'r')
journal_info = cPickle.load(journal_info_file)
journal_id = journal_info.get('journal_id', None)
journal_name = journal_info.get('journal_name', None)
current_issue = journal_info.get('current_issue', None)
if journal_id is not None and \
journal_name is not None:
journals.append({'journal_id': journal_id,
'journal_name': journal_name,
'current_issue': current_issue})
except cPickle.PickleError, e:
# Well, can't do anything...
continue
except IOError:
# Well, can't do anything...
continue
except ValueError:
continue
return journals
def parse_url_string(uri):
"""
Centralized function to parse any url string given in
webjournal. Useful to retrieve current category, journal,
etc. from within format elements
The webjournal interface handler should already have cleaned the
URI beforehand, so that journal name exist, issue number is
correct, etc. The only remaining problem might be due to the
capitalization of journal name in contact, search and popup pages,
so clean the journal name. Note that language is also as returned
from the URL, which might need to be filtered to match available
languages (WebJournal elements can rely in bfo.lang to retrieve
washed language)
returns:
args: all arguments in dict form
"""
args = {'journal_name' : '',
'issue_year' : '',
'issue_number' : None,
'issue' : None,
'category' : '',
'recid' : -1,
'verbose' : 0,
'ln' : CFG_SITE_LANG,
'archive_year' : None,
'archive_search': ''}
if not uri.startswith('/journal'):
# Mmh, incorrect context. Still, keep language if available
url_params = urlparse(uri)[4]
args['ln'] = dict([part.split('=') for part in url_params.split('&') \
if len(part.split('=')) == 2]).get('ln', CFG_SITE_LANG)
return args
# Take everything after journal and before first question mark
splitted_uri = uri.split('journal', 1)
second_part = splitted_uri[1]
splitted_uri = second_part.split('?')
uri_middle_part = splitted_uri[0]
uri_arguments = ''
if len(splitted_uri) > 1:
uri_arguments = splitted_uri[1]
arg_list = uri_arguments.split("&")
args['ln'] = CFG_SITE_LANG
args['verbose'] = 0
for arg_pair in arg_list:
arg_and_value = arg_pair.split('=')
if len(arg_and_value) == 2:
if arg_and_value[0] == 'ln':
args['ln'] = arg_and_value[1]
elif arg_and_value[0] == 'verbose' and \
arg_and_value[1].isdigit():
args['verbose'] = int(arg_and_value[1])
elif arg_and_value[0] == 'archive_year' and \
arg_and_value[1].isdigit():
args['archive_year'] = int(arg_and_value[1])
elif arg_and_value[0] == 'archive_search':
args['archive_search'] = arg_and_value[1]
elif arg_and_value[0] == 'name':
args['journal_name'] = guess_journal_name(args['ln'],
arg_and_value[1])
arg_list = uri_middle_part.split("/")
if len(arg_list) > 1 and arg_list[1] not in ['search', 'contact', 'popup']:
args['journal_name'] = urllib.unquote(arg_list[1])
elif arg_list[1] not in ['search', 'contact', 'popup']:
args['journal_name'] = guess_journal_name(args['ln'],
args['journal_name'])
cur_issue = get_current_issue(args['ln'], args['journal_name'])
if len(arg_list) > 2:
try:
args['issue_year'] = int(urllib.unquote(arg_list[2]))
except:
args['issue_year'] = int(cur_issue.split('/')[1])
else:
args['issue'] = cur_issue
args['issue_year'] = int(cur_issue.split('/')[1])
args['issue_number'] = int(cur_issue.split('/')[0])
if len(arg_list) > 3:
try:
args['issue_number'] = int(urllib.unquote(arg_list[3]))
except:
args['issue_number'] = int(cur_issue.split('/')[0])
args['issue'] = make_issue_number(args['journal_name'],
args['issue_number'],
args['issue_year'])
if len(arg_list) > 4:
args['category'] = urllib.unquote(arg_list[4])
if len(arg_list) > 5:
try:
args['recid'] = int(urllib.unquote(arg_list[5]))
except:
pass
args['ln'] = get_journal_preferred_language(args['journal_name'],
args['ln'])
# FIXME : wash arguments?
return args
def make_journal_url(current_uri, custom_parameters=None):
"""
Create a URL, using the current URI and overriding values
with the given custom_parameters
Parameters:
current_uri - *str* the current full URI
custom_parameters - *dict* a dictionary of parameters that
should override those of curent_uri
"""
if not custom_parameters:
custom_parameters = {}
default_params = parse_url_string(current_uri)
for key, value in custom_parameters.iteritems():
# Override default params with custom params
default_params[key] = str(value)
uri = CFG_SITE_URL + '/journal/'
if default_params['journal_name']:
uri += urllib.quote(default_params['journal_name']) + '/'
if default_params['issue_year'] and default_params['issue_number']:
uri += make_issue_number(default_params['journal_name'],
default_params['issue_number'],
default_params['issue_year'],
for_url_p=True) + '/'
if default_params['category']:
uri += urllib.quote(default_params['category'])
if default_params['recid'] and \
default_params['recid'] != -1:
uri += '/' + str(default_params['recid'])
printed_question_mark = False
if default_params['ln']:
uri += '?ln=' + default_params['ln']
printed_question_mark = True
if default_params['verbose'] != 0:
if printed_question_mark:
uri += '&verbose=' + str(default_params['verbose'])
else:
uri += '?verbose=' + str(default_params['verbose'])
return uri
############################ HTML CACHING FUNCTIONS ############################
def cache_index_page(html, journal_name, category, issue, ln):
"""
Caches the index page main area of a Bulletin
(right hand menu cannot be cached)
"""
issue = issue.replace("/", "_")
category = category.replace(" ", "")
cache_path = os.path.abspath('%s/webjournal/%s/%s_index_%s_%s.html' % \
(CFG_CACHEDIR, journal_name,
issue, category,
ln))
if not cache_path.startswith(CFG_CACHEDIR + '/webjournal'):
# Mmh, not accessing correct path. Stop caching
return False
cache_path_dir = '%s/webjournal/%s' % (CFG_CACHEDIR, journal_name)
if not os.path.isdir(cache_path_dir):
os.makedirs(cache_path_dir)
cached_file = open(cache_path, "w")
cached_file.write(html)
cached_file.close()
def get_index_page_from_cache(journal_name, category, issue, ln):
"""
Function to get an index page from the cache.
False if not in cache.
"""
issue = issue.replace("/", "_")
category = category.replace(" ", "")
cache_path = os.path.abspath('%s/webjournal/%s/%s_index_%s_%s.html' % \
(CFG_CACHEDIR, journal_name,
issue, category, ln))
if not cache_path.startswith(CFG_CACHEDIR + '/webjournal'):
# Mmh, not accessing correct path. Stop reading cache
return False
try:
cached_file = open(cache_path).read()
except:
return False
return cached_file
def cache_article_page(html, journal_name, category, recid, issue, ln):
"""
Caches an article view of a journal.
"""
issue = issue.replace("/", "_")
category = category.replace(" ", "")
cache_path = os.path.abspath('%s/webjournal/%s/%s_article_%s_%s_%s.html' % \
(CFG_CACHEDIR, journal_name,
issue, category, recid, ln))
if not cache_path.startswith(CFG_CACHEDIR + '/webjournal'):
# Mmh, not accessing correct path. Stop caching
return
cache_path_dir = '%s/webjournal/%s' % (CFG_CACHEDIR, journal_name)
if not os.path.isdir(cache_path_dir):
os.makedirs(cache_path_dir)
cached_file = open(cache_path, "w")
cached_file.write(html)
cached_file.close()
def get_article_page_from_cache(journal_name, category, recid, issue, ln):
"""
Gets an article view of a journal from cache.
False if not in cache.
"""
issue = issue.replace("/", "_")
category = category.replace(" ", "")
cache_path = os.path.abspath('%s/webjournal/%s/%s_article_%s_%s_%s.html' % \
(CFG_CACHEDIR, journal_name,
issue, category, recid, ln))
if not cache_path.startswith(CFG_CACHEDIR + '/webjournal'):
# Mmh, not accessing correct path. Stop reading cache
return False
try:
cached_file = open(cache_path).read()
except:
return False
return cached_file
def clear_cache_for_article(journal_name, category, recid, issue):
"""
Resets the cache for an article (e.g. after an article has been
modified)
"""
issue = issue.replace("/", "_")
category = category.replace(" ", "")
cache_path = os.path.abspath('%s/webjournal/%s/' %
(CFG_CACHEDIR, journal_name))
if not cache_path.startswith(CFG_CACHEDIR + '/webjournal'):
# Mmh, not accessing correct path. Stop deleting cache
return False
# try to delete the article cached file
try:
os.remove('%s/webjournal/%s/%s_article_%s_%s_en.html' %
(CFG_CACHEDIR, journal_name, issue, category, recid))
except:
pass
try:
os.remove('%s/webjournal/%s/%s_article_%s_%s_fr.html' %
(CFG_CACHEDIR, journal_name, issue, category, recid))
except:
pass
# delete the index page for the category
try:
os.remove('%s/webjournal/%s/%s_index_%s_en.html'
% (CFG_CACHEDIR, journal_name, issue, category))
except:
pass
try:
os.remove('%s/webjournal/%s/%s_index_%s_fr.html'
% (CFG_CACHEDIR, journal_name, issue, category))
except:
pass
try:
path = get_journal_article_cache_path(journal_name, issue)
os.remove(path)
except:
pass
return True
def clear_cache_for_issue(journal_name, issue):
"""
clears the cache of a whole issue.
"""
issue = issue.replace("/", "_")
cache_path_dir = os.path.abspath('%s/webjournal/%s' % \
(CFG_CACHEDIR, journal_name))
if not cache_path_dir.startswith(CFG_CACHEDIR + '/webjournal'):
# Mmh, not accessing correct path. Stop deleting cache
return False
all_cached_files = os.listdir(cache_path_dir)
non_deleted = []
for cached_file in all_cached_files:
if cached_file.startswith(issue.replace('/', '_')):
try:
os.remove(cache_path_dir + '/' + cached_file)
except:
return False
else:
non_deleted.append(cached_file)
return True
######################### CERN SPECIFIC FUNCTIONS #################
def get_recid_from_legacy_number(issue_number, category, number):
"""
Returns the recid based on the issue number, category and
'number'.
This is used to support URLs using the now deprecated 'number'
argument. The function tries to reproduce the behaviour of the
old way of doing, even keeping some of its 'problems' (so that we
reach the same article as before with a given number)..
Returns the recid as int, or -1 if not found
"""
recids = []
if issue_number[0] == "0":
alternative_issue_number = issue_number[1:]
recids = list(search_pattern(p='65017a:"%s" and 773__n:%s' %
(category, issue_number)))
recids.extend(list(search_pattern(p='65017a:"%s" and 773__n:%s' %
(category, alternative_issue_number))))
else:
recids = list(search_pattern(p='65017:"%s" and 773__n:%s' %
(category, issue_number)))
# Now must order the records and pick the one at index 'number'.
# But we have to take into account that there can be multiple
# records at position 1, and that these additional records should
# be numbered with negative numbers:
# 1, 1, 1, 2, 3 -> 1, -1, -2, 2, 3...
negative_index_records = {}
positive_index_records = {}
# Fill in 'negative_index_records' and 'positive_index_records'
# lists with the following loop
for recid in recids:
bfo = BibFormatObject(recid)
order = [subfield['c'] for subfield in bfo.fields('773__') if \
issue_number in subfield.get('n', '')]
if len(order) > 0:
# If several orders are defined for the same article and
# the same issue, keep the first one
order = order[0]
if order.isdigit():
# Order must be an int. Otherwise skip
order = int(order)
if order == 1 and positive_index_records.has_key(1):
# This is then a negative number for this record
index = (len(negative_index_records.keys()) > 0 and \
min(negative_index_records.keys()) -1) or 0
negative_index_records[index] = recid
else:
# Positive number for this record
if not positive_index_records.has_key(order):
positive_index_records[order] = recid
else:
# We make the assumption that we cannot have
# twice the same position for two
# articles. Previous WebJournal module was not
# clear about that. Just drop this record
# (better than crashing or looping forever..)
pass
recid_to_return = -1
# Ok, we can finally pick the recid corresponding to 'number'
if number <= 0:
negative_indexes = negative_index_records.keys()
negative_indexes.sort()
negative_indexes.reverse()
if len(negative_indexes) > abs(number):
recid_to_return = negative_index_records[negative_indexes[abs(number)]]
else:
if positive_index_records.has_key(number):
recid_to_return = positive_index_records[number]
return recid_to_return
def is_recid_in_released_issue(recid):
"""
Returns True if recid is part of the latest issue of the given
journal.
WARNING: the function does not check that the article does not
belong to the draft collection of the record. This is wanted, in
order to workaround the time needed for a record to go from the
draft collection to the final collection
"""
bfo = BibFormatObject(recid)
journal_name = ''
journal_names = [journal_name for journal_name in bfo.fields('773__t') if journal_name]
if journal_names:
journal_name = journal_names[0]
else:
return False
existing_journal_names = [o['journal_name'] for o in get_journals_ids_and_names()]
if not journal_name in existing_journal_names:
# Try to remove whitespace
journal_name = journal_name.replace(' ', '')
if not journal_name in existing_journal_names:
# Journal name unknown from WebJournal
return False
config_strings = get_xml_from_config(["draft_image_access_policy"], journal_name)
if config_strings['draft_image_access_policy'] and \
config_strings['draft_image_access_policy'][0] != 'allow':
# The journal does not want to optimize access to images
return False
article_issues = bfo.fields('773__n')
current_issue = get_current_issue(CFG_SITE_LANG, journal_name)
for article_issue in article_issues:
# Check each issue until a released one is found
if get_release_datetime(article_issue, journal_name):
# Release date exists, issue has been released
return True
else:
# Unreleased issue. Do we still allow based on journal config?
unreleased_issues_mode = get_unreleased_issue_hiding_mode(journal_name)
if (unreleased_issues_mode == 'none' or \
(unreleased_issues_mode == 'future' and \
not issue_is_later_than(article_issue, current_issue))):
return True
return False
| fjorba/invenio | modules/webjournal/lib/webjournal_utils.py | Python | gpl-2.0 | 67,476 |
# Generated by Django 2.1.4 on 2020-01-31 16:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plugininstances', '0009_auto_20190715_1706'),
]
operations = [
migrations.AlterField(
model_name='pathparameter',
name='value',
field=models.CharField(max_length=200),
),
]
| FNNDSC/ChRIS_ultron_backEnd | chris_backend/plugininstances/migrations/0010_auto_20200131_1619.py | Python | mit | 401 |
"""Contains functions to annotate macromolecular entities."""
from cogent.core.entity import HIERARCHY
__author__ = "Marcin Cieslik"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Marcin Cieslik"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Marcin Cieslik"
__email__ = "mpc4p@virginia.edu"
__status__ = "Development"
def xtradata(data, entity):
"""Annotates an entity with data from a ``{full_id:data}`` dictionary. The
``data`` should also be a dictionary.
Arguments:
- data: a dictionary, which is a mapping of full_id's (keys) and data
dictionaries.
- entity: top-level entity, which contains the entities which will hold
the data."""
for full_id, data in data.iteritems():
sub_entity = entity
strip_full_id = [i for i in full_id if i is not None]
for short_id in strip_full_id:
sub_entity = sub_entity[(short_id,)]
sub_entity.xtra.update(data)
| sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/struct/annotation.py | Python | mit | 1,011 |
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| Schakkall/Votapar | project/project/urls.py | Python | gpl-3.0 | 764 |
#!/usr/bin/env python3
# Copyright (c) 2016-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate COINBASE_MATURITY (CB) more blocks to ensure the coinbases are mature.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in block CB + 3.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on block CB + 4.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on block CB + 5.
"""
import time
from test_framework.blocktools import (
COINBASE_MATURITY,
NORMAL_GBT_REQUEST_PARAMS,
add_witness_commitment,
create_block,
create_transaction,
)
from test_framework.messages import CTransaction
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
NULLDUMMY_ERROR = "non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if len(newscript) == 0:
assert len(i) == 0
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
self.extra_args = [[
f'-testactivationheight=segwit@{COINBASE_MATURITY + 5}',
'-addresstype=legacy',
'-par=1', # Use only one script thread to get the exact reject reason for testing
]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[0].get_wallet_rpc('wmulti')
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
self.address = w0.getnewaddress()
self.pubkey = w0.getaddressinfo(self.address)['pubkey']
self.ms_address = wmulti.addmultisigaddress(1, [self.pubkey])['address']
self.wit_address = w0.getnewaddress(address_type='p2sh-segwit')
self.wit_ms_address = wmulti.addmultisigaddress(1, [self.pubkey], '', 'p2sh-segwit')['address']
if not self.options.descriptors:
# Legacy wallets need to import these so that they are watched by the wallet. This is unnecessary (and does not need to be tested) for descriptor wallets
wmulti.importaddress(self.ms_address)
wmulti.importaddress(self.wit_ms_address)
self.coinbase_blocks = self.generate(self.nodes[0], 2) # block height = 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.generate(self.nodes[0], COINBASE_MATURITY) # block height = COINBASE_MATURITY + 2
self.lastblockhash = self.nodes[0].getbestblockhash()
self.lastblockheight = COINBASE_MATURITY + 2
self.lastblocktime = int(time.time()) + self.lastblockheight
self.log.info(f"Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [{COINBASE_MATURITY + 3}]")
test1txs = [create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, amount=49)]
txid1 = self.nodes[0].sendrawtransaction(test1txs[0].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], txid1, self.ms_address, amount=48))
txid2 = self.nodes[0].sendrawtransaction(test1txs[1].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, amount=49))
txid3 = self.nodes[0].sendrawtransaction(test1txs[2].serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test1txs, accept=True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = create_transaction(self.nodes[0], txid2, self.ms_address, amount=47)
trueDummy(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0)
self.log.info(f"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [{COINBASE_MATURITY + 4}]")
self.block_submit(self.nodes[0], [test2tx], accept=True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = create_transaction(self.nodes[0], test2tx.hash, self.address, amount=46)
test6txs = [CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test4tx], accept=False)
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = create_transaction(self.nodes[0], txid3, self.wit_address, amount=48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test5tx], with_witness=True, accept=False)
self.log.info(f"Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [{COINBASE_MATURITY + 5}]")
for i in test6txs:
self.nodes[0].sendrawtransaction(i.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test6txs, with_witness=True, accept=True)
def block_submit(self, node, txs, *, with_witness=False, accept):
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
assert_equal(tmpl['previousblockhash'], self.lastblockhash)
assert_equal(tmpl['height'], self.lastblockheight + 1)
block = create_block(tmpl=tmpl, ntime=self.lastblocktime + 1)
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
if with_witness:
add_witness_commitment(block)
block.solve()
assert_equal(None if accept else NULLDUMMY_ERROR, node.submitblock(block.serialize().hex()))
if accept:
assert_equal(node.getbestblockhash(), block.hash)
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| prusnak/bitcoin | test/functional/feature_nulldummy.py | Python | mit | 7,358 |
import dbus
from servicetest import assertEquals, assertNotEquals, call_async, EventPattern
from gabbletest import exec_test, acknowledge_iq, make_muc_presence
import constants as cs
from twisted.words.xish import xpath
import ns
from mucutil import join_muc_and_check
def test(q, bus, conn, stream, access_control):
iq_event = q.expect('stream-iq', to=None, query_ns='vcard-temp',
query_name='vCard')
acknowledge_iq(stream, iq_event.stanza)
muc = 'chat@conf.localhost'
_, _, test_handle, bob_handle = \
join_muc_and_check(q, bus, conn, stream, muc)
# Bob offers a stream tube
bob_bus_name = ':2.Ym9i'
presence = make_muc_presence('owner', 'moderator', 'chat@conf.localhost', 'bob')
tubes = presence.addElement((ns.TUBES, 'tubes'))
tube = tubes.addElement((None, 'tube'))
tube['type'] = 'dbus'
tube['initiator'] = 'chat@conf.localhost/bob'
tube['stream-id'] = '10'
tube['id'] = '1'
tube['service'] = 'com.example.Test'
tube['dbus-name'] = bob_bus_name
parameters = tube.addElement((None, 'parameters'))
parameter = parameters.addElement((None, 'parameter'))
parameter['type'] = 'str'
parameter['name'] = 'foo'
parameter.addContent('bar')
stream.send(presence)
# tubes channel is created
event = q.expect('dbus-signal', signal='NewChannels')
channels = event.args[0]
path, props = channels[0]
# tube channel is created
event = q.expect('dbus-signal', signal='NewChannels')
channels = event.args[0]
path, props = channels[0]
assertEquals(cs.CHANNEL_TYPE_DBUS_TUBE, props[cs.CHANNEL_TYPE])
assertEquals('chat@conf.localhost/bob', props[cs.INITIATOR_ID])
bob_handle = props[cs.INITIATOR_HANDLE]
assertEquals([cs.CHANNEL_IFACE_GROUP, cs.CHANNEL_IFACE_TUBE],
props[cs.INTERFACES])
assertEquals(False, props[cs.REQUESTED])
assertEquals('chat@conf.localhost', props[cs.TARGET_ID])
assertEquals('com.example.Test', props[cs.DBUS_TUBE_SERVICE_NAME])
assertEquals({'foo': 'bar'}, props[cs.TUBE_PARAMETERS])
assertEquals([cs.SOCKET_ACCESS_CONTROL_CREDENTIALS,
cs.SOCKET_ACCESS_CONTROL_LOCALHOST],
props[cs.DBUS_TUBE_SUPPORTED_ACCESS_CONTROLS])
tube_chan = bus.get_object(conn.bus_name, path)
tube_iface = dbus.Interface(tube_chan, cs.CHANNEL_IFACE_TUBE)
dbus_tube_iface = dbus.Interface(tube_chan, cs.CHANNEL_TYPE_DBUS_TUBE)
tube_chan_iface = dbus.Interface(tube_chan, cs.CHANNEL)
# only Bob is in DBusNames
dbus_names = tube_chan.Get(cs.CHANNEL_TYPE_DBUS_TUBE, 'DBusNames', dbus_interface=cs.PROPERTIES_IFACE)
assertEquals({bob_handle: bob_bus_name}, dbus_names)
call_async(q, dbus_tube_iface, 'Accept', access_control)
return_event, names_changed1, names_changed2, presence_event = q.expect_many(
EventPattern('dbus-return', method='Accept'),
EventPattern('dbus-signal', signal='DBusNamesChanged', interface=cs.CHANNEL_TYPE_DBUS_TUBE),
EventPattern('dbus-signal', signal='DBusNamesChanged', interface=cs.CHANNEL_TYPE_DBUS_TUBE),
EventPattern('stream-presence', to='chat@conf.localhost/test'))
tube_addr = return_event.value[0]
assert len(tube_addr) > 0
# check presence stanza
tube_node = xpath.queryForNodes('/presence/tubes/tube', presence_event.stanza)[0]
assertEquals('chat@conf.localhost/bob', tube_node['initiator'])
assertEquals('com.example.Test', tube_node['service'])
assertEquals('10', tube_node['stream-id'])
assertEquals('dbus', tube_node['type'])
assertEquals('1', tube_node['id'])
self_bus_name = tube_node['dbus-name']
tubes_self_handle = tube_chan.GetSelfHandle(dbus_interface=cs.CHANNEL_IFACE_GROUP)
assertNotEquals(0, tubes_self_handle)
# both of us are in DBusNames now
dbus_names = tube_chan.Get(cs.CHANNEL_TYPE_DBUS_TUBE, 'DBusNames', dbus_interface=cs.PROPERTIES_IFACE)
assertEquals({bob_handle: bob_bus_name, tubes_self_handle: self_bus_name}, dbus_names)
added, removed = names_changed1.args
assertEquals({bob_handle: bob_bus_name}, added)
assertEquals([], removed)
added, removed = names_changed2.args
assertEquals({tubes_self_handle: self_bus_name}, added)
assertEquals([], removed)
tube_chan_iface.Close()
q.expect_many(
EventPattern('dbus-signal', signal='Closed'),
EventPattern('dbus-signal', signal='ChannelClosed'))
if __name__ == '__main__':
# We can't use t.exec_dbus_tube_test() as we can use only the muc bytestream
exec_test(lambda q, bus, conn, stream:
test(q, bus, conn, stream, cs.SOCKET_ACCESS_CONTROL_CREDENTIALS))
exec_test(lambda q, bus, conn, stream:
test(q, bus, conn, stream, cs.SOCKET_ACCESS_CONTROL_LOCALHOST))
| mlundblad/telepathy-gabble | tests/twisted/tubes/accept-muc-dbus-tube.py | Python | lgpl-2.1 | 4,784 |
from __future__ import unicode_literals
from django.utils import timezone
from django.core.management.base import NoArgsCommand
from sms_operator.sender import sender
class Command(NoArgsCommand):
def handle_noargs(self, **options):
checked_messages_qs = sender.update_status()
self.stdout.write('Update status of %s sms messages' % checked_messages_qs.count())
| matllubos/django-sms-operator | sms_operator/management/commands/update_sms_status.py | Python | lgpl-3.0 | 387 |
# -*- coding: utf-8 -*-
# Copyright (C) 2005 Osmo Salomaa
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import aeidon
import gaupol
from gi.repository import Gtk
from unittest.mock import patch
OK = lambda *args: Gtk.ResponseType.OK
CANCEL = lambda *args: Gtk.ResponseType.CANCEL
class TestToolsAgent(gaupol.TestCase):
def setup_method(self, method):
self.application = self.new_application()
@patch("gaupol.util.flash_dialog", CANCEL)
def test__on_adjust_durations_activate(self):
self.application.get_action("adjust-durations").activate()
@patch("gaupol.util.flash_dialog", OK)
def test__on_check_spelling_activate(self):
language = self.get_spell_check_language("en")
gaupol.conf.spell_check.language = language
self.application.get_action("check-spelling").activate()
@patch("gaupol.util.flash_dialog", OK)
def test__on_configure_spell_check_activate(self):
self.application.get_action("configure-spell-check").activate()
@patch("gaupol.util.flash_dialog", CANCEL)
def test__on_convert_framerate_activate(self):
self.application.get_action("convert-framerate").activate()
@patch("gaupol.TextAssistant.show", lambda *args: None)
def test__on_correct_texts_activate(self):
self.application.get_action("correct-texts").activate()
@patch("gaupol.util.flash_dialog", CANCEL)
def test__on_shift_positions_activate__frame(self):
page = self.application.get_current_page()
page.edit_mode = aeidon.modes.FRAME
self.application.get_action("shift-positions").activate()
@patch("gaupol.util.flash_dialog", CANCEL)
def test__on_shift_positions_activate__time(self):
page = self.application.get_current_page()
page.edit_mode = aeidon.modes.TIME
self.application.get_action("shift-positions").activate()
@patch("gaupol.util.flash_dialog", CANCEL)
def test__on_transform_positions_activate__frame(self):
page = self.application.get_current_page()
page.edit_mode = aeidon.modes.FRAME
self.application.get_action("transform-positions").activate()
@patch("gaupol.util.flash_dialog", CANCEL)
def test__on_transform_positions_activate__time(self):
page = self.application.get_current_page()
page.edit_mode = aeidon.modes.TIME
self.application.get_action("transform-positions").activate()
| otsaloma/gaupol | gaupol/agents/test/test_tools.py | Python | gpl-3.0 | 3,011 |
#!/usr/bin/env python2
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Gris Ge <fge@redhat.com>
import re
import urllib2
import urllib
from cookielib import CookieJar
_HTTP_STATUS_OK = 200
_HTTP_STATUS_UNAUTHORIZED = 401
class ObsError(Exception):
"""
Error raised by Obs class.
The ObsError object has two properties:
code (integer):
ObsError.LIB_BUG
Please report a bug to us.
ObsError.BAD_AUTH
Invalid username or password.
ObsError.EMPTY_PROJECT
Provided project has no package or repository configured.
message (string):
Human friendly error message.
"""
LIB_BUG = 1
BAD_AUTH = 2
EMPTY_PROJECT = 3
def __init__(self, code, message, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.code = code
self.message = message
def _handle_http_errors(method):
"""
Convert urllib2.HTTPError to ObsError
"""
def wrapper(*args, **kargs):
try:
return method(*args, **kargs)
except urllib2.HTTPError as http_err:
http_err_code = http_err.getcode()
if http_err_code == _HTTP_STATUS_UNAUTHORIZED:
raise ObsError(
ObsError.BAD_AUTH, "Invalid username and password")
raise ObsError(
ObsError.LIB_BUG,
"URL %s got http error %d: %s" %
(http_err.geturl(), http_err_code, str(http_err)))
return wrapper
def _parse_build_status(out_str):
"""
Due to XML vulnerabilities of python modules,
https://docs.python.org/2/library/xml.html#xml-vulnerabilities
use regex instead.
Return a list in this data layout:
[
{
project = "home:cathay4t:misc_fedora",
repository = "Fedora_20",
arch = "i586",
code = "published",
state = "published",
statuscount = {
succeeded = 1,
failed = 0,
}
}
]
"""
rc_list = list()
tmp_dict = dict()
regex_status_count = re.compile(
'statuscount code="(?P<code>[a-z]+)" count="(?P<count>[0-9]+)"')
for line in out_str.split("\n"):
line = line.lstrip()
if line.startswith("<result "):
# Remove heading and tailing < >
line = line[1:-1]
for key_value in line.split(" "):
tmp_l = key_value.split("=")
if len(tmp_l) == 2:
value = tmp_l[1]
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
tmp_dict[tmp_l[0]] = value
tmp_dict["statuscount"] = {}
elif line.startswith("<statuscount "):
regex_result = regex_status_count.search(line)
if regex_result:
tmp_dict["statuscount"][regex_result.group('code')] = \
regex_result.group('count')
elif line == "</result>":
if len(tmp_dict) != 0:
rc_list.append(tmp_dict)
tmp_dict = dict()
return rc_list
class Obs(object):
"""
Library for OpenSuSE Build Service build management using
OBS REST API at open-build-service project '/docs/api/api/api.txt'.
"""
API_URL = "https://api.opensuse.org"
STATUS_OK = 0
STATUS_FAILED = 1
STATUS_BUILDING = 2
_BUILD_STATUS_FAILED_LIST = ['failed', 'unresolvable', 'broken']
_BUILD_STATUS_BUILDING_LIST = ['scheduled', 'building', 'dispatching',
'blocked', 'signing', 'finished',
'unknown']
def __init__(self, username, password, project, apiurl=None):
"""
Define apiurl if you are not using public openSuSE build service(
https://build.opensuse.org).
"""
self.username = username
self.password = password
self.project = project
if apiurl is None:
self.apiurl = Obs.API_URL
else:
self.apiurl = apiurl
cookie_jar = CookieJar()
pass_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
pass_mgr.add_password(None, self.apiurl, username, password)
self._opener = urllib2.build_opener(
urllib2.HTTPCookieProcessor(cookie_jar),
urllib2.HTTPBasicAuthHandler(pass_mgr))
@_handle_http_errors
def service_remoterun(self, package):
"""
Usage:
Trigger a re-run on the server side via:
POST /source/<project_name>/<package_name>?cmd=runservice
Post with empty data.
Args:
package (string)
The package name to invoke "service remoterun".
Return:
void
Raise exception when error.
"""
url = "%s/source/%s/%s?cmd=runservice" % (self.apiurl, self.project,
package)
rc_obj = self._opener.open(url, data="")
http_code = rc_obj.getcode()
if http_code != _HTTP_STATUS_OK:
raise ObsError(
http_code,
"service_remoterun() failed with http error "
"code %d, message %s" % (http_code, rc_obj.read()))
@_handle_http_errors
def project_status(self):
"""
Usage:
Check overall status of this project via:
GET /build/<project_name>/_result?view=summary
Args:
void
Return:
(status, reason)
status (integer)
Obs.STATUS_OK
Build finished and repo is ready.
Obs.STATUS_FAILED
At least one package is failed.
Obs.STATUS_BUILDING
At least one package is still waiting, building or
signing or publishing repo. No package has any
failure yet. This is an intermediate state, it will
lead to Obs.STATUS_OK or Obs.STATUS_FAILED.
Obs.STATUS_UNKNOWN
reason (string)
Human friendly message when not Obs.STATUS_OK.
Example:
Fedora_21 x86_64 has 2 packages in building state.
"""
url = "%s/build/%s/_result?view=summary" % (self.apiurl, self.project)
out_str = self._opener.open(url).read()
status = Obs.STATUS_OK
reason = ""
bs_list = []
if not out_str:
raise ObsError(
ObsError.LIB_BUG,
"project_status(): Got empty string return from %s" % url)
bs_list = _parse_build_status(out_str)
if len(bs_list) == 0:
raise ObsError(
ObsError.EMPTY_PROJECT,
"Provided project has not repository or package")
for bs in bs_list:
if bs['state'] != 'published' and bs['state'] != 'building':
status = Obs.STATUS_BUILDING
reason += "%s %s repo is publishing. " % (
bs['repository'], bs['arch'])
sc = bs['statuscount']
for building_key in Obs._BUILD_STATUS_BUILDING_LIST:
if building_key in sc.keys():
status = Obs.STATUS_BUILDING
reason += "%s %s has %s packages in %s state. " % \
(bs['repository'], bs['arch'],
sc[building_key], building_key)
for bs in bs_list:
sc = bs['statuscount']
for failed_key in Obs._BUILD_STATUS_FAILED_LIST:
if failed_key in sc.keys():
status = Obs.STATUS_BUILDING
reason += "%s %s has %s packages in %s state. " % \
(bs['repository'], bs['arch'],
sc[failed_key], failed_key)
reason = reason.rstrip()
return (status, reason)
def file_upload(self, package, file_name, file_content, commit_comment):
"""
Usage:
Create/override certain file via:
PUT /source/<project_name>/<package_name>/<filename>
Args:
package (string)
The package name.
file_name (string)
The file name to create or overide.
file_content (string)
The file content.
commit_comment (string)
Comment string for osc commit.
Return:
void
Raise exception when error.
"""
url = "%s/source/%s/%s/%s?%s" % (
self.apiurl, self.project, package, file_name,
urllib.urlencode({'comment': commit_comment}))
request = urllib2.Request(url, data=file_content)
request.get_method = lambda: 'PUT'
rc_obj = self._opener.open(request)
http_code = rc_obj.getcode()
if http_code != _HTTP_STATUS_OK:
raise ObsError(
http_code,
"file_upload() failed with http error "
"code %d, message %s" % (http_code, rc_obj.read()))
| cathay4t/python-libobs | libobs.py | Python | gpl-3.0 | 10,027 |
# Generated by Django 2.1.7 on 2019-04-11 06:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0014_auto_20190120_1049'),
]
operations = [
migrations.AlterField(
model_name='chapter',
name='image',
field=models.URLField(blank=True, verbose_name='image'),
),
migrations.AlterField(
model_name='imagepreview',
name='image',
field=models.URLField(blank=True, verbose_name='image'),
),
migrations.AlterField(
model_name='imagepreview',
name='image_en',
field=models.URLField(blank=True, null=True, verbose_name='image'),
),
migrations.AlterField(
model_name='imagepreview',
name='image_it',
field=models.URLField(blank=True, null=True, verbose_name='image'),
),
migrations.AlterField(
model_name='product',
name='image',
field=models.URLField(blank=True, verbose_name='image'),
),
]
| flavoi/diventi | diventi/products/migrations/0015_auto_20190411_0806.py | Python | apache-2.0 | 1,132 |
#!/usr/bin/python
#
# Copyright (C) 2012 Michael Spreitzenbarth, Sven Schmitt
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, string, time, subprocess, sys
import _xmlParser, _adel_log, _sqliteParser
from _processXmlConfig import PhoneConfig
def string_replace(input):
output = input.decode("utf-8")
return output
def string_replace_2(input):
output = input.encode("utf-8")
return output
def analyze_call_logs(backup_dir, os_version, xml_dir, config):
######################################################################################################################################################
# definition of Tuples #
######################################################################################################################################################
# CALL LOGS
## Database Name
db_file_call_logs = backup_dir + "/" + config.db_file_call_logs
######################################################################################################################################################
## Table Name
#### calls
## corresponding table number in the database
CALL_LOG_TABLE_NUMBER = config.call_log_table_num
## Table Design
#### [[['_id', 'INTEGER'], ['number', 'TEXT'], ['presentation', 'INTEGER'], ['date', 'INTEGER'], ['duration', 'INTEGER'], ['type', 'INTEGER'],
# ['new', 'INTEGER'], ['name', 'TEXT'], ['numbertype', 'INTEGER'], ['numberlabel', 'TEXT'], ['countryiso', 'TEXT'], ['voicemail_uri', 'TEXT'],
# ['is_read', 'INTEGER'], ['geocoded_location', 'TEXT'], ['lookup_uri', 'TEXT'], ['matched_number', 'TEXT'], ['normalized_number', 'TEXT'],
# ['photo_id', 'INTEGER'], ['formatted_number', 'TEXT'], ['_data', 'TEXT'], ['has_content', 'INTEGER'], ['mime_type', 'TEXT'],
# ['source_data', 'TEXT'], ['source_package', 'TEXT'], ['state', 'INTEGER']]
## Tuple Definition -> the integers in this tuple represent the
## corresponding columns of the database table of the above table design
CALL_LOG_ENTRIES_LIST = config.call_log_entry_positions
######################################################################################################################################################
if os.path.isfile(db_file_call_logs):
_adel_log.log("parseDBs: ----> starting to parse \033[0;32mcall logs\033[m", 0)
call_log_list = []
try:
result_list = _sqliteParser.parse_db(db_file_call_logs)
for i in range(1, len(result_list[CALL_LOG_TABLE_NUMBER])):
# call_log_list = [[id, number, date, duration, type, name],[......],......]
if os_version == "442":
call_log_list.append([
str(result_list[CALL_LOG_TABLE_NUMBER][i][CALL_LOG_ENTRIES_LIST[0]]),
str(result_list[CALL_LOG_TABLE_NUMBER][i][CALL_LOG_ENTRIES_LIST[1]]),
time.strftime("%a, %d %B %Y %H:%M:%S",time.gmtime(result_list[CALL_LOG_TABLE_NUMBER][i][CALL_LOG_ENTRIES_LIST[2]] / 1000.0)),
str(result_list[CALL_LOG_TABLE_NUMBER][i][CALL_LOG_ENTRIES_LIST[3]]),
str(result_list[CALL_LOG_TABLE_NUMBER][i][CALL_LOG_ENTRIES_LIST[4]]),
string_replace(str(result_list[CALL_LOG_TABLE_NUMBER][i][CALL_LOG_ENTRIES_LIST[6]]))])
else:
call_log_list.append([
str(result_list[CALL_LOG_TABLE_NUMBER][i][CALL_LOG_ENTRIES_LIST[0]]),
str(result_list[CALL_LOG_TABLE_NUMBER][i][CALL_LOG_ENTRIES_LIST[1]]),
time.strftime("%a, %d %B %Y %H:%M:%S",time.gmtime(result_list[CALL_LOG_TABLE_NUMBER][i][CALL_LOG_ENTRIES_LIST[2]] / 1000.0)),
str(result_list[CALL_LOG_TABLE_NUMBER][i][CALL_LOG_ENTRIES_LIST[3]]),
str(result_list[CALL_LOG_TABLE_NUMBER][i][CALL_LOG_ENTRIES_LIST[4]]),
string_replace(str(result_list[15][i][CALL_LOG_ENTRIES_LIST[5]]))])
except:
_adel_log.log("analyzeDBs: ----> it seems that there are no call logs or that the database has an unsupported encoding!", 1)
_xmlParser.call_log_to_xml(xml_dir, call_log_list)
else:
_adel_log.log("analyzeDBs: ----> database file " + db_file_call_logs.split("/")[2] + " missing!", 1)
def analyze_sms_mss(backup_dir, os_version, xml_dir, config):
######################################################################################################################################################
# definition of Tuples #
######################################################################################################################################################
# SMS MMS Messages
## Database Name
db_file_sms = backup_dir + "/" + config.db_file_sms
######################################################################################################################################################
## Table Name
#### sms
## corresponding table number in the database
SMS_TABLE_NUMBER = config.sms_table_num
## Table Design
#### [[['_id', 'INTEGER'], ['thread_id', 'INTEGER'], ['address', 'TEXT'], ['person', 'INTEGER'], ['date', 'INTEGER'], ['date_sent', 'INTEGER'],
# ['protocol', 'INTEGER'], ['read', 'INTEGER'], ['status', 'INTEGER'], ['type', 'INTEGER'], ['reply_path_present', 'INTEGER'], ['subject', 'TEXT'],
# ['body', 'TEXT'], ['service_center', 'TEXT'], ['locked', 'INTEGER'], ['error_code', 'INTEGER'], ['seen', 'INTEGER']]
## Tuple Definition -> the integers in this tuple represent the corresponding columns of the database table of the above table design
SMS_ENTRIES_LIST = config.sms_entry_positions
######################################################################################################################################################
if os.path.isfile(db_file_sms):
_adel_log.log("parseDBs: ----> starting to parse \033[0;32mSMS messages\033[m", 0)
sms_list = []
try:
result_list = _sqliteParser.parse_db(db_file_sms)
for i in range(1, len(result_list[SMS_TABLE_NUMBER])):
# sms_list = [[id, thread_id, number, person, date, read, type, subject, body],[......],......]
sms_list.append([
str(result_list[SMS_TABLE_NUMBER][i][SMS_ENTRIES_LIST[0]]),
str(result_list[SMS_TABLE_NUMBER][i][SMS_ENTRIES_LIST[1]]),
str(result_list[SMS_TABLE_NUMBER][i][SMS_ENTRIES_LIST[2]]),
str(result_list[SMS_TABLE_NUMBER][i][SMS_ENTRIES_LIST[3]]),
time.strftime("%a, %d %B %Y %H:%M:%S", time.gmtime(result_list[SMS_TABLE_NUMBER][i][SMS_ENTRIES_LIST[4]] / 1000.0)),
str(result_list[SMS_TABLE_NUMBER][i][SMS_ENTRIES_LIST[5]]),
str(result_list[SMS_TABLE_NUMBER][i][SMS_ENTRIES_LIST[6]]),
str(result_list[SMS_TABLE_NUMBER][i][SMS_ENTRIES_LIST[7]]),
string_replace(str(result_list[SMS_TABLE_NUMBER][i][SMS_ENTRIES_LIST[8]]))
])
except:
_adel_log.log("analyzeDBs: ----> it seems that there are no SMS or MMS messages or that the database has an unsupported encoding!", 1)
_xmlParser.sms_messages_to_xml(xml_dir, sms_list)
else:
_adel_log.log("analyzeDBs: ----> database file " + db_file_sms.split("/")[2] + " missing!", 1)
def analyze_calendar(backup_dir, os_version, xml_dir, config):
######################################################################################################################################################
# definition of Tuples #
######################################################################################################################################################
# CALENDAR ENTRIES
## Database Name
db_file_calendar = backup_dir + "/" + config.db_file_calendar
######################################################################################################################################################
## Table Name
#### calendars
## corresponding table number in the database
CALENDAR_NAME_TABLE_NUMBER = config.calendar_name_table_num
## Table Design
#### ['_id', 'INTEGER'], ['account_name', 'TEXT'], ['account_type', 'TEXT'], ['_sync_id', 'TEXT'], ['dirty', 'INTEGER'], ['mutators', 'TEXT'],
# ['name', 'TEXT'], ['calendar_displayName', 'TEXT'], ['calendar_color', 'INTEGER'], ['calendar_color_index', 'TEXT'], ['calendar_access_level', 'INTEGER'],
# ['visible', 'INTEGER'], ['sync_events', 'INTEGER'], ['calendar_location', 'TEXT'], ['calendar_timezone', 'TEXT'], ['ownerAccount', 'TEXT'],
# ['isPrimary', 'INTEGER'], ['canOrganizerRespond', 'INTEGER'], ['canModifyTimeZone', 'INTEGER'], ['canPartiallyUpdate', 'INTEGER'],
# ['maxReminders', 'INTEGER'], ['allowedReminders', 'TEXT'], "1'", ['allowedAvailability', 'TEXT'], "1'", ['allowedAttendeeTypes', 'TEXT'], '1', "2'",
# ['deleted', 'INTEGER'], ['cal_sync1', 'TEXT'], ['cal_sync2', 'TEXT'], ['cal_sync3', 'TEXT'], ['cal_sync4', 'TEXT'], ['cal_sync5', 'TEXT'],
# ['cal_sync6', 'TEXT'], ['cal_sync7', 'TEXT'], ['cal_sync8', 'TEXT'], ['cal_sync9', 'TEXT'], ['cal_sync10', 'TEXT']
## Tuple Definition -> the integers in this tuple represent the corresponding columns of the database table of the above table design
CALENDARS_NAME_LIST = config.calendar_name_list
######################################################################################################################################################
## Table Name
#### events
## corresponding table number in the database
CALENDAR_EVENTS_TABLE_NUMBER = config.calendar_events_table_num
## Table Design
#### ['_id', 'INTEGER'], ['_sync_id', 'TEXT'], ['dirty', 'INTEGER'], ['mutators', 'TEXT'], ['lastSynced', 'INTEGER'], ['calendar_id', 'INTEGER'],
# ['title', 'TEXT'], ['eventLocation', 'TEXT'], ['description', 'TEXT'], ['eventColor', 'INTEGER'], ['eventColor_index', 'TEXT'],
# ['eventStatus', 'INTEGER'], ['selfAttendeeStatus', 'INTEGER'], ['dtstart', 'INTEGER'], ['dtend', 'INTEGER'], ['eventTimezone', 'TEXT'],
# ['duration', 'TEXT'], ['allDay', 'INTEGER'], ['accessLevel', 'INTEGER'], ['availability', 'INTEGER'], ['hasAlarm', 'INTEGER'],
# ['hasExtendedProperties', 'INTEGER'], ['rrule', 'TEXT'], ['rdate', 'TEXT'], ['exrule', 'TEXT'], ['exdate', 'TEXT'], ['original_id', 'INTEGER'],
# ['original_sync_id', 'TEXT'], ['originalInstanceTime', 'INTEGER'], ['originalAllDay', 'INTEGER'], ['lastDate', 'INTEGER'], ['hasAttendeeData', 'INTEGER'],
# ['guestsCanModify', 'INTEGER'], ['guestsCanInviteOthers', 'INTEGER'], ['guestsCanSeeGuests', 'INTEGER'], ['organizer', 'STRING'],
# ['isOrganizer', 'INTEGER'], ['deleted', 'INTEGER'], ['eventEndTimezone', 'TEXT'], ['customAppPackage', 'TEXT'], ['customAppUri', 'TEXT'], ['uid2445', 'TEXT'],
# ['sync_data1', 'TEXT'], ['sync_data2', 'TEXT'], ['sync_data3', 'TEXT'], ['sync_data4', 'TEXT'], ['sync_data5', 'TEXT'], ['sync_data6', 'TEXT'],
# ['sync_data7', 'TEXT'], ['sync_data8', 'TEXT'], ['sync_data9', 'TEXT'], ['sync_data10', 'TEXT']
## Tuple Definition -> the integers in this tuple represent the corresponding columns of the database table of the above table design
CALENDAR_EVENTS_LIST = config.calendar_events_list
######################################################################################################################################################
if os.path.isfile(db_file_calendar):
_adel_log.log("parseDBs: ----> starting to parse \033[0;32mcalendar entries\033[m", 0)
calendar_list = []
try:
result_list = _sqliteParser.parse_db(db_file_calendar)
for i in range(1, len(result_list[CALENDAR_EVENTS_TABLE_NUMBER])):
if str(result_list[CALENDAR_EVENTS_TABLE_NUMBER][i][CALENDAR_EVENTS_LIST[6]]) != "None":
end = time.strftime("%a, %d %B %Y %H:%M:%S", time.gmtime(result_list[CALENDAR_EVENTS_TABLE_NUMBER][i][CALENDAR_EVENTS_LIST[6]] / 1000.0))
else:
end = time.strftime("%a, %d %B %Y %H:%M:%S", time.gmtime(result_list[CALENDAR_EVENTS_TABLE_NUMBER][i][CALENDAR_EVENTS_LIST[5]] / 1000.0))
# calendar_list = [[id, calendarName, title, eventLocation, description, allDay, start, end, hasAlarm],[......],......]
calendar_list.append([
str(result_list[CALENDAR_EVENTS_TABLE_NUMBER][i][CALENDAR_EVENTS_LIST[0]]),
str(result_list[CALENDAR_NAME_TABLE_NUMBER][result_list[CALENDAR_EVENTS_TABLE_NUMBER][i][CALENDAR_EVENTS_LIST[1]]][CALENDARS_NAME_LIST[1]]),
string_replace(str(result_list[CALENDAR_EVENTS_TABLE_NUMBER][i][CALENDAR_EVENTS_LIST[2]])),
string_replace(str(result_list[CALENDAR_EVENTS_TABLE_NUMBER][i][CALENDAR_EVENTS_LIST[3]])),
string_replace(str(result_list[CALENDAR_EVENTS_TABLE_NUMBER][i][CALENDAR_EVENTS_LIST[4]])),
str(result_list[CALENDAR_EVENTS_TABLE_NUMBER][i][CALENDAR_EVENTS_LIST[7]]),
time.strftime("%a, %d %B %Y %H:%M:%S", time.gmtime(result_list[CALENDAR_EVENTS_TABLE_NUMBER][i][CALENDAR_EVENTS_LIST[5]] / 1000.0)),
end,
str(result_list[CALENDAR_EVENTS_TABLE_NUMBER][i][CALENDAR_EVENTS_LIST[8]])
])
except:
_adel_log.log("analyzeDBs: ----> it seems that there are no entries in the calendar or that the database has an unsupported encoding!", 1)
_xmlParser.calendar_to_xml(xml_dir, calendar_list)
else:
_adel_log.log("analyzeDBs: ----> database file " + db_file_calendar.split("/")[2] + " missing!", 1)
def analyze_contacts(backup_dir, os_version, xml_dir, config):
######################################################################################################################################################
# definition of Tuples #
######################################################################################################################################################
# CONTACTS ENTRIES
## Database Name
db_file_contacts = backup_dir + "/" + config.db_file_contacts
######################################################################################################################################################
## Table Name
#### contacts
## corresponding table number in the database
CONTACTS_TABLE_NUMBER = config.contacts_normal_table_num
## Table Design
#### [['_id', 'INTEGER'], ['name_raw_contact_id', 'INTEGER'], ['photo_id', 'INTEGER'], ['photo_file_id', 'INTEGER'], ['custom_ringtone', 'TEXT'],
# ['send_to_voicemail', 'INTEGER'], ['times_contacted', 'INTEGER'], ['last_time_contacted', 'INTEGER'], ['starred', 'INTEGER'], ['pinned', 'INTEGER'],
# ['has_phone_number', 'INTEGER'], ['lookup', 'TEXT'], ['status_update_id', 'INTEGER'], ['contact_last_updated_timestamp', 'INTEGER']]
## Tuple Definition -> the integers in this tuple represent the corresponding columns of the database table of the above table design
CONTACTS_LIST = config.contacts_normal_list
######################################################################################################################################################
## Table Name
#### raw_contacts
## corresponding table number in the database
RAW_CONTACTS_TABLE_NUMBER = config.raw_contacts_table_num
## Table Design
#### [['_id', 'INTEGER'], ['account_id', 'INTEGER'], ['sourceid', 'TEXT'], ['raw_contact_is_read_only', 'INTEGER'], ['version', 'INTEGER'],
# ['dirty', 'INTEGER'], ['deleted', 'INTEGER'], ['contact_id', 'INTEGER'], ['aggregation_mode', 'INTEGER'], ['aggregation_needed', 'INTEGER'],
# ['custom_ringtone', 'TEXT'], ['send_to_voicemail', 'INTEGER'], ['times_contacted', 'INTEGER'], ['last_time_contacted', 'INTEGER'],
# ['starred', 'INTEGER'], ['pinned', 'INTEGER'], ['display_name', 'TEXT'], ['display_name_alt', 'TEXT'], ['display_name_source', 'INTEGER'],
# ['phonetic_name', 'TEXT'], ['phonetic_name_style', 'TEXT'], ['sort_key', 'TEXT'], ['phonebook_label', 'TEXT'], ['phonebook_bucket', 'INTEGER'],
# ['sort_key_alt', 'TEXT'], ['phonebook_label_alt', 'TEXT'], ['phonebook_bucket_alt', 'INTEGER'], ['name_verified', 'INTEGER'], ['sync1', 'TEXT'],
# ['sync2', 'TEXT'], ['sync3', 'TEXT'], ['sync4', 'TEXT']]
## Tuple Definition -> the integers in this tuple represent the corresponding columns of the database table of the above table design
RAW_CONTACTS_LIST = config.raw_contacts_list
RAW_CONTACTS_LIST_DELETED = config.raw_contacts_deleted_list
######################################################################################################################################################
## Table Name
#### data
## corresponding table number in the database
DATA_TABLE_NUMBER = config.data_table_num
## Table Design
#### [['_id', 'INTEGER'], ['package_id', 'INTEGER'], ['mimetype_id', 'INTEGER'], ['raw_contact_id', 'INTEGER'], ['is_read_only', 'INTEGER'],
# ['is_primary', 'INTEGER'], ['is_super_primary', 'INTEGER'], ['data_version', 'INTEGER'], ['data1', 'TEXT'], ['data2', 'TEXT'], ['data3', 'TEXT'],
# ['data4', 'TEXT'], ['data5', 'TEXT'], ['data6', 'TEXT'], ['data7', 'TEXT'], ['data8', 'TEXT'], ['data9', 'TEXT'], ['data10', 'TEXT'],
# ['data11', 'TEXT'], ['data12', 'TEXT'], ['data13', 'TEXT'], ['data14', 'TEXT'], ['data15', 'TEXT'], ['data_sync1', 'TEXT'], ['data_sync2', 'TEXT'],
# ['data_sync3', 'TEXT'], ['data_sync4', 'TEXT']]
## Tuple Definition -> the integers in this tuple represent the corresponding columns of the database table of the above table design
DATA_ENTRY_LIST = config.data_entry_list
######################################################################################################################################################
## Table Name
#### mimetypes
## corresponding table number in the database
MIME_TABLE_NUMBER = config.mime_table_num
## Table Design and matching mimetypes for the contact entries
#### [['_id', 'INTEGER'], ['mimetype', 'TEXT']]
# [1, 'vnd.android.cursor.item/email_v2'], [2, 'vnd.android.cursor.item/im'], [3, 'vnd.android.cursor.item/nickname'],
# [4, 'vnd.android.cursor.item/organization'], [5, 'vnd.android.cursor.item/phone_v2'], [6, 'vnd.android.cursor.item/sip_address'],
# [7, 'vnd.android.cursor.item/name'], [8, 'vnd.android.cursor.item/postal-address_v2'], [9, 'vnd.android.cursor.item/identity'],
# [10, 'vnd.android.cursor.item/photo'], [11, 'vnd.android.cursor.item/group_membership']]
MIME_TYPE_LIST = config.mime_type_entry_list
######################################################################################################################################################
if os.path.isfile(db_file_contacts):
_adel_log.log("parseDBs: ----> starting to parse \033[0;32maddress book entries\033[m", 0)
contacts_list = []
try:
result_list = _sqliteParser.parse_db(db_file_contacts)
for i in range(1, len(result_list[CONTACTS_TABLE_NUMBER])):
email = "None"
url = "None"
lastname = "None"
firstname = "None"
number = "None"
address = "None"
company = "None"
# convert the date and time of the last call to this contact
if result_list[CONTACTS_TABLE_NUMBER][i][CONTACTS_LIST[3]] == 0:
last_time_contacted = "NEVER"
else:
last_time_contacted = time.strftime("%a, %d %B %Y %H:%M:%S", time.gmtime(result_list[CONTACTS_TABLE_NUMBER][i][CONTACTS_LIST[3]] / 1000.0))
# search the display name and other data for this contact
for j in range(1, len(result_list[RAW_CONTACTS_TABLE_NUMBER])):
# display name
if result_list[RAW_CONTACTS_TABLE_NUMBER][j][RAW_CONTACTS_LIST[0]] == result_list[CONTACTS_TABLE_NUMBER][i][CONTACTS_LIST[0]]:
display_name = string_replace(str(result_list[RAW_CONTACTS_TABLE_NUMBER][j][RAW_CONTACTS_LIST[1]]))
# other data like name, phone number, etc. => identified by mimetype
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[3]:
lastname = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[4]]))
firstname = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[3]]))
break;
else:
continue;
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[0]:
if email == "None":
email = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
email = email + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
continue;
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[4]:
if url == "None":
url = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
url = url + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
continue;
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[2]:
if number == "None":
number = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
number = number + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
continue;
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[1]:
if "\n" in string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[5]])):
_adel_log.log("analyzeDBs: ----> check formatting of postal address for contact with id " + str(result_list[CONTACTS_TABLE_NUMBER][i][CONTACTS_LIST[0]]) + "!", 2)
address = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[5]]))
else:
if address == "None":
address = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[5]])) + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[8]])) + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[10]])) + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[11]]))
else:
address = address + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
continue;
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[5]:
if company == "None":
company = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
company = company + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
continue;
break;
else:
continue;
# contacts_list = [[id, photo_id, times_contacted, last_time_contacted, starred, number, display_name, lastname, firstname, company, email, url, address],[......],.....]
contacts_list.append([
str(result_list[CONTACTS_TABLE_NUMBER][i][CONTACTS_LIST[0]]),
str(result_list[CONTACTS_TABLE_NUMBER][i][CONTACTS_LIST[1]]),
str(result_list[CONTACTS_TABLE_NUMBER][i][CONTACTS_LIST[2]]),
last_time_contacted,
str(result_list[CONTACTS_TABLE_NUMBER][i][CONTACTS_LIST[4]]),
number,
display_name,
lastname,
firstname,
company,
email,
url,
address
])
# search for deleted entries and gather information
for j in range(1, len(result_list[RAW_CONTACTS_TABLE_NUMBER])):
del_email = "None"
del_url = "None"
del_lastname = "None"
del_firstname = "None"
del_number = "None"
del_address = "None"
del_company = "None"
if result_list[RAW_CONTACTS_TABLE_NUMBER][j][RAW_CONTACTS_LIST_DELETED[1]] == 1:
_adel_log.log("analyzeDBs: ----> found deleted contact entry!", 3)
# convert the date and time of the last call to this contact
if result_list[RAW_CONTACTS_TABLE_NUMBER][j][RAW_CONTACTS_LIST_DELETED[3]] == None:
del_last_time_contacted = "NEVER"
else:
del_last_time_contacted = time.strftime("%a, %d %B %Y %H:%M:%S", time.gmtime(result_list[RAW_CONTACTS_TABLE_NUMBER][j][RAW_CONTACTS_LIST_DELETED[3]] / 1000.0))
del_times_contacted = str(result_list[RAW_CONTACTS_TABLE_NUMBER][j][RAW_CONTACTS_LIST_DELETED[2]])
del_starred = str(result_list[RAW_CONTACTS_TABLE_NUMBER][j][RAW_CONTACTS_LIST_DELETED[4]])
del_display_name = string_replace(str(result_list[RAW_CONTACTS_TABLE_NUMBER][j][RAW_CONTACTS_LIST_DELETED[5]]))
# other data like name, phone number, etc. => identified by mimetype
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[3]:
del_lastname = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[4]]))
del_firstname = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[3]]))
break;
else:
continue;
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[0]:
if del_email == "None":
del_email = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
del_email = del_email + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
continue;
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[4]:
if del_url == "None":
del_url = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
del_url = del_url + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
continue;
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[2]:
if del_number == "None":
del_number = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
del_number = del_number + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
continue;
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[1]:
if "\n" in string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[5]])):
_adel_log.log("analyzeDBs: ----> check formatting of postal address for contact with id " + str(result_list[CONTACTS_TABLE_NUMBER][i][CONTACTS_LIST[0]]) + "!", 2)
del_address = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[5]]))
else:
if del_address == "None":
del_address = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[5]])) + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[8]])) + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[10]])) + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[11]]))
else:
del_address = del_address + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
continue;
for k in range(1, len(result_list[DATA_TABLE_NUMBER])):
if result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[1]] == j and result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[0]] == MIME_TYPE_LIST[5]:
if del_company == "None":
del_company = string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
del_company = del_company + ";" + string_replace(str(result_list[DATA_TABLE_NUMBER][k][DATA_ENTRY_LIST[2]]))
else:
continue;
else:
continue;
# contacts_list = [[id, photo_id, times_contacted, last_time_contacted, starred, number, display_name, lastname, firstname, company, email, url, address],[......],.....]
contacts_list.append([
"DELETED",
"DELETED",
del_times_contacted,
del_last_time_contacted,
del_starred,
del_number,
del_display_name,
del_lastname,
del_firstname,
del_company,
del_email,
del_url,
del_address
])
except:
_adel_log.log("analyzeDBs: ----> it seems that the contact list is empty or that the database has an unsupported encoding!", 1)
_xmlParser.contacts_to_xml(xml_dir, contacts_list)
else:
_adel_log.log("analyzeDBs: ----> database file " + db_file_contacts.split("/")[2] + " missing!", 1)
def phone_info(backup_dir, os_version, xml_dir, handheld_id, config):
######################################################################################################################################################
# definition of Tuples #
######################################################################################################################################################
# SMARTPHONE INFORMATION
## Database Name
db_file_info_1 = backup_dir + "/" + config.file_info1_db_name
ACCOUNTS_TABLE_NUMBER = config.file_info1_table_num
######################################################################################################################################################
## Table Design
#### [['row_id', 'INTEGER'], ['account_name', 'TEXT'], ['account_type', 'TEXT']]
## Tuple Definition -> the integers in this tuple represent the corresponding columns of the database table of the above table design
ACCOUNTS_ENTRIES = config.account_entry_list
######################################################################################################################################################
## Database Name
db_file_info_2 = backup_dir + "/" + config.file_info2_db_name
######################################################################################################################################################
## Table Name
#### meta
## corresponding table number in the database
META_TABLE_NUMBER = config.file_info2_table_num
## Table Design
#### [['key', 'TEXT'], ['value', 'TEXT']]
## Tuple Definition -> the integers in this tuple represent the corresponding columns of the database table of the above table design
META_ENTRY = config.meta_entry_list[0] # only one element
######################################################################################################################################################
## Database Name
db_file_info_3 = backup_dir + "/" + config.file_info3_db_name
######################################################################################################################################################
## Table Name
#### secure
## corresponding table number in the database
ID_TABLE_NUMBER = config.file_info3_table_num
## Table Design
#### [['_id', 'INTEGER'],['name', 'TEXT'],['value', 'TEXT']]
## Tuple Definition -> the integers in this tuple represent the corresponding columns of the database table of the above table design
ID_ENTRIES = config.settings_entry_list
######################################################################################################################################################
_adel_log.log("parseDBs: ----> starting to parse \033[0;32msmartphone info\033[m", 0)
if os.path.isfile(db_file_info_1):
try:
result_list_1 = _sqliteParser.parse_db(db_file_info_1)
account_name = str(result_list_1[ACCOUNTS_TABLE_NUMBER][1][ACCOUNTS_ENTRIES[0]])
account_type = str(result_list_1[ACCOUNTS_TABLE_NUMBER][1][ACCOUNTS_ENTRIES[1]])
except:
_adel_log.log("analyzeDBs: ----> can't get required data from " + db_file_info_1.split("/")[2] + "! Please check manually for account data.", 1)
account_name = "not available"
account_type = "not available"
else:
account_name = "not available"
account_type = "not available"
if os.path.isfile(db_file_info_2):
try:
result_list_2 = _sqliteParser.parse_db(db_file_info_2)
imsi = str(result_list_2[META_TABLE_NUMBER][1][META_ENTRY])
except:
_adel_log.log("analyzeDBs: ----> can't get required data from " + db_file_info_2.split("/")[2] + "! Please check manually for IMSI.", 1)
imsi = "not available"
else:
imsi = "not available"
if os.path.isfile(db_file_info_3):
try:
result_list_3 = _sqliteParser.parse_db(db_file_info_3)
android_id = str(result_list_3[ID_TABLE_NUMBER][ID_ENTRIES[1]][ID_ENTRIES[0]])
except:
_adel_log.log("analyzeDBs: ----> can't get required data from " + db_file_info_3.split("/")[2] + "! Please check manually for android ID.", 1)
android_id = "not available"
else:
android_id = "not available"
model = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.product.model'], stdout=subprocess.PIPE).communicate(0)[0]
phone_info_list = [account_name, account_type, imsi, android_id, handheld_id, model, os_version]
_xmlParser.smartphone_info_to_xml(xml_dir, phone_info_list)
def analyze_twitter(backup_dir, os_version, xml_dir, twitter_dbname_list, config):
twitter_dbname_list.sort
######################################################################################################################################################
##Table name
#### {user_id}.db
TWITTER_USERS_TABLE = config.twitter_user_table_num
## Table Design
#### [0 ['_id', 'Int'],
#### 1 ['user_id', 'Int'],
#### 2 ['username', 'TEXT'],
#### 3 ['name', 'TEXT'],
#### 4 ['description', 'TEXT'],
#### 5 ['web_url', 'TEXT'],
#### 7 ['location', 'TEXT'],
#### 10 ['followers', 'Int'],
#### 11 ['friends', 'Int'],
#### 13 ['geo_enable', 'Int'],
#### 14 ['profile_created', 'Int'],
#### 15 ['image_url', 'TEXT'],
#### 17 ['updated', 'Int']]
TWITTER_USERS_USER_ID = config.twitter_user_list[0]
TWITTER_USERS_USERNAME = config.twitter_user_list[1]
TWITTER_USERS_NAME = config.twitter_user_list[2]
TWITTER_USERS_DESCRIPTION = config.twitter_user_list[3]
TWITTER_USERS_LOCATION = config.twitter_user_list[4]
TWITTER_USERS_FOLLOWERS = config.twitter_user_list[5]
TWITTER_USERS_FRIENDS = config.twitter_user_list[6]
TWITTER_USERS_GEO_ENABLE = config.twitter_user_list[7]
TWITTER_USERS_PROFILE_CREATED = config.twitter_user_list[8]
TWITTER_USERS_UPDATED = config.twitter_user_list[9]
##
## Table Name
#### statuses
TWITTER_STATUSES_TABLE = config.statuses_table_num
## Table Design
#### [0 ['_id', 'Int'],
#### 1 ['status_id', 'Int'],
#### 2 ['author_id', 'Int'], seems to correspond to TWITTER_USERS_TABLE [1 ['user_id', 'Int']]
#### 3 ['content', 'TEXT'],
#### 4 ['source', 'TEXT']
#### 5 ['source_url', 'TEXT']
#### 6 ['created', 'Int']
TWITTER_STATUSES_AUTHOR_ID = config.twitter_statuses_list[0]
TWITTER_STATUSES_CONTENT = config.twitter_statuses_list[1]
TWITTER_STATUSES_SOURCE = config.twitter_statuses_list[2]
TWITTER_STATUSES_SOURCE_URL = config.twitter_statuses_list[3]
TWITTER_STATUSES_CREATED = config.twitter_statuses_list[4]
for k in range (0, len(twitter_dbname_list)):
twitter_db_name = backup_dir + "/" + twitter_dbname_list[k]
if twitter_db_name.split("/")[-1] == "0.db":
continue
elif twitter_db_name.split("/")[-1] == "twitter.db":
continue
elif twitter_db_name.split("/")[-1] == "global.db":
continue
else:
if os.path.isfile(twitter_db_name):
_adel_log.log("parseDBs: ----> starting to parse \033[0;32m" + twitter_db_name.split("/")[-1] + "\033[m", 0)
users_list = []
temp_list = _sqliteParser.parse_db(twitter_db_name)
try:
for i in range (1, len(temp_list[TWITTER_USERS_TABLE])):
## Entry generated is User_ID, User_Name, Real_Name, description, location (if given), profile_created, updated, followers, friends
if temp_list[TWITTER_USERS_TABLE][i][TWITTER_USERS_GEO_ENABLE] == 1:
location = str(temp_list[TWITTER_USERS_TABLE][i][TWITTER_USERS_LOCATION])
else:
location = "no location"
users_list.append([
filter(lambda x: x in string.printable, str(temp_list[TWITTER_USERS_TABLE][i][TWITTER_USERS_USER_ID])),
filter(lambda x: x in string.printable, str(temp_list[TWITTER_USERS_TABLE][i][TWITTER_USERS_USERNAME])),
filter(lambda x: x in string.printable, str(temp_list[TWITTER_USERS_TABLE][i][TWITTER_USERS_NAME])),
filter(lambda x: x in string.printable, str(temp_list[TWITTER_USERS_TABLE][i][TWITTER_USERS_DESCRIPTION])),
location,
time.strftime("%a, %d %B %Y %H:%M:%S", time.gmtime((temp_list[TWITTER_USERS_TABLE][i][TWITTER_USERS_PROFILE_CREATED]) / 1000)),
time.strftime("%a, %d %B %Y %H:%M:%S", time.gmtime((temp_list[TWITTER_USERS_TABLE][i][TWITTER_USERS_UPDATED]) / 1000)),
filter(lambda x: x in string.printable, str(temp_list[TWITTER_USERS_TABLE][i][TWITTER_USERS_FOLLOWERS])),
filter(lambda x: x in string.printable, str(temp_list[TWITTER_USERS_TABLE][i][TWITTER_USERS_FRIENDS])),
])
#_adel_log.log(users_list[0][0],users_list[0][1],users_list[0][2],users_list[0][3],users_list[0][4],users_list[0][5],users_list[0][6],users_list[0][7],users_list[0][8])
except:
_adel_log.log("analyzeDBs: ----> can't get required user data from " + twitter_db_name.split("/")[-1] + " ! DB structure changed?", 1)
continue
messages_list = {}
try:
for j in range (1, len(temp_list[TWITTER_STATUSES_TABLE])):
k = temp_list[TWITTER_STATUSES_TABLE][j][TWITTER_STATUSES_AUTHOR_ID]
if temp_list[TWITTER_STATUSES_TABLE][j][TWITTER_STATUSES_SOURCE_URL] == None:
source_url = "n.a."
else:
source_url = temp_list[TWITTER_STATUSES_TABLE][j][TWITTER_STATUSES_SOURCE_URL]
m = (string_replace(str(temp_list[TWITTER_STATUSES_TABLE][j][TWITTER_STATUSES_CONTENT])), string_replace(str(temp_list[TWITTER_STATUSES_TABLE][j][TWITTER_STATUSES_SOURCE])), source_url, time.strftime("%a, %d %B %Y %H:%M:%S", time.gmtime(temp_list[TWITTER_STATUSES_TABLE][j][TWITTER_STATUSES_CREATED] / 1000)))
if messages_list.has_key(k):
messages_list[k].append(m)
else:
entry = []
entry.append(m)
messages_list[k] = entry
except:
_adel_log.log("analyzeDBs: ----> can't get required status data from " + twitter_db_name.split("/")[-1] + " ! DB structure changed?", 1)
continue
_xmlParser.twitter_to_xml(xml_dir, users_list, messages_list)
else:
_adel_log.log("analyzeDBs: ----> database file " + twitter_db_name.split("/")[-1] + " missing!", 1)
def analyze_facebook(backup_dir, os_version, xml_dir, config):
FB_DB_FILENAME = backup_dir + config.fb_db_filename
######################################################################################################################################################
## Table Name
#### user_values
FB_USER_VALUES_TABLE = config.fb_users_values_table_num
## table design
## 1 name
## 2 value
## FB puts some interesting information into this by filling the name/value slots with information. the following entries where of interest.
## it has to be tested if other accounts put the information in a different order. currently the order looks like this:
## 2 active_session_info -> div. Infos in a {dict} Structur
## 4 last_contacts_sync -> timestamp
## 5 current_account -> Username of the FB user
## 5 last_seen_id_message \
## 6 last_seen_id_poke \
## 7 last_seen_id_friend_request\
## 8 last_seen_id_event_invite -> no field description in sql -> flags?
FB_USER_VALUES_ACTIVE_SESSION_INFO = config.fb_users_values_list[0]
FB_USER_VALUES_LAST_CONTACTS_SYNC = config.fb_users_values_list[1]
FB_USER_VALUES_CURRENT_ACCOUNT = config.fb_users_values_list[2]
## table name
#### connections
FB_CONNECTIONS = config.fb_connections_table_num
## table design
## 1 user_id -> uid again
## 2 display_name -> full name as given with the FB account
## 3 connection_type -> type of account: 0 = person, 2 = fan group
## 4 user_image_url
## 5 user_image -> actual image as {blob} inserted
FB_CONNECTIONS_USER_ID = config.fb_connection_list[0]
FB_CONNECTIONS_DISPLAY_NAME = config.fb_connection_list[1]
FB_CONNECTIONS_CONNECTION_TYPE = config.fb_connection_list[2]
FB_CONNECTIONS_USER_IMAGE_URL = config.fb_connection_list[3]
## table name
#### friends_data
FB_FRIENDS_DATA = config.fb_friends_data_table_num
## table design
## 1 user_id -> uid once more
## 2 first_name
## 3 last_name
## 6 email
## 7 birthday_month
## 8 birthday_day
## 9 birthday_year
FB_FRIENDS_DATA_USER_ID = config.fb_friends_data_list[0]
FB_FRIENDS_DATA_FIRST_NAME = config.fb_friends_data_list[1]
FB_FRIENDS_DATA_LAST_NAME = config.fb_friends_data_list[2]
FB_FRIENDS_DATA_EMAIL = config.fb_friends_data_list[3]
FB_FRIENDS_DATA_BDAY_MONTH = config.fb_friends_data_list[4]
FB_FRIENDS_DATA_BDAY_DAY = config.fb_friends_data_list[5]
FB_FRIENDS_DATA_BDAY_YEAR = config.fb_friends_data_list[6]
if os.path.isfile(FB_DB_FILENAME):
_adel_log.log("parseDBs: ----> starting to parse \033[0;32m" + FB_DB_FILENAME.split("/")[-1] + "\033[m", 0)
FB_TempList = _sqliteParser.parse_db(FB_DB_FILENAME)
try:
# print FB_TempList[FB_USER_VALUES_TABLE][2][2]
FB_active_session_temp = string.replace(FB_TempList[FB_USER_VALUES_TABLE][2][2], '\"', '\'') #replace the " with ' to use it for dict structure
FB_active_session = {}
# print "ping"
p = 0
o = 0
while o < len(FB_active_session_temp): # inside the value of the active session field is a dictionary structure which includes a second dict as value
o = o + 1 # of the 'profile' key of the first structure. I therefore parse the structure as a string and rebuild the dict.
if FB_active_session_temp[o] == ':':
key_start = p + 1
key_end = o
# print key_start,key_end
key = FB_active_session_temp[key_start:key_end]
# print key
for p in range (o, len(FB_active_session_temp)):
if FB_active_session_temp[p] == '{':
o = p
break
elif FB_active_session_temp[p] == '}':
value_start = o + 1
value_end = p
#print value_start,value_end
value = FB_active_session_temp[value_start:value_end]
#print value
FB_active_session [key] = value
o = len(FB_active_session_temp)
break
elif FB_active_session_temp[p] == ',':
value_start = o + 1
value_end = p
#print value_start,value_end
value = FB_active_session_temp[value_start:value_end]
#print value
FB_active_session [key] = value
o = p
#print o,p
break
except:
print "Error: ", sys.exc_info()[0]
_adel_log.log("analyzeDBs: ----> can't get the required active session data from " + FB_DB_FILENAME.split("/")[-1] + " ! DB structure changed?", 1)
try:
friends_list = []
for i in range(1, len(FB_TempList[FB_FRIENDS_DATA])):
friends_list.append([
str(FB_TempList[FB_FRIENDS_DATA][i][FB_FRIENDS_DATA_USER_ID]),
str(FB_TempList[FB_FRIENDS_DATA][i][FB_FRIENDS_DATA_FIRST_NAME]) + " " + str(FB_TempList[FB_FRIENDS_DATA][i][FB_FRIENDS_DATA_LAST_NAME]),
str(FB_TempList[FB_FRIENDS_DATA][i][FB_FRIENDS_DATA_BDAY_DAY]) + "." + str(FB_TempList[FB_FRIENDS_DATA][i][FB_FRIENDS_DATA_BDAY_MONTH]) + "." + str(FB_TempList[FB_FRIENDS_DATA][i][FB_FRIENDS_DATA_BDAY_YEAR]),
str(FB_TempList[FB_FRIENDS_DATA][i][FB_FRIENDS_DATA_EMAIL])
])
#for j in range(0,len(userlist)):
# print userlist[j]
except:
_adel_log.log("analyzeDBs: ----> can't get the required friends data from " + FB_DB_FILENAME.split("/")[-1] + " ! DB structure changed?", 1)
try:
conn_list = []
for k in range(1, len(FB_TempList[FB_CONNECTIONS])):
conn_list.append([
str(FB_TempList[FB_CONNECTIONS][k][FB_CONNECTIONS_USER_ID]),
str(FB_TempList[FB_CONNECTIONS][k][FB_CONNECTIONS_DISPLAY_NAME]),
str(FB_TempList[FB_CONNECTIONS][k][FB_CONNECTIONS_USER_IMAGE_URL])
])
#for l in range(0,len(conn_list)):
# print conn_list[l]
except:
_adel_log.log("analyzeDBs: ----> can't get the required connections data from " + FB_DB_FILENAME.split("/")[-1] + " ! DB structure changed?", 1)
_xmlParser.facebook_to_xml(xml_dir, FB_active_session, friends_list, conn_list)
def analyze(backup_dir, os_version, xml_dir, twitter_dbname_list, config):
_adel_log.log("############ DATABASE ANALYSIS ############ \n", 2)
analyze_calendar(backup_dir, os_version, xml_dir, config)
analyze_sms_mss(backup_dir, os_version, xml_dir, config)
analyze_call_logs(backup_dir, os_version, xml_dir, config)
analyze_contacts(backup_dir, os_version, xml_dir, config)
analyze_facebook(backup_dir, os_version, xml_dir, config)
analyze_twitter(backup_dir, os_version, xml_dir, twitter_dbname_list, config) | draekko/ADEL | _analyzeDB.py | Python | gpl-3.0 | 53,681 |
#!/usr/bin/python
import sys
from LAPS.MsgBus.Bus import Bus
# Create queue with a unique name
# insert message
# receive msg
# delete queue
if __name__ == "__main__":
# If invoked directly, parse command line arguments for logger information
# and pass the rest to the run() method defined above
# --------------------------------------------------------------------------
try:
unique_queue_name = sys.argv[1]
except:
print "Not enough command line arguments: this test needs a unique queue name"
exit(1)
#msgbus = Bus(broker="lhd002", address=unique_queue_name)
#parset = """
#key=value
#"""
#msgbus.send(parset,"Observation123456") | jjdmol/LOFAR | SubSystems/LAPS_CEP/test/startPythonFromMsg.py | Python | gpl-3.0 | 725 |
import functools
import mock
from django.test import TestCase
from django.views.generic import View
from django_universal_view_decorator.decorators.view_class_decorator import view_class_decorator, ViewClassDecorator
def test_log(*args, **kwargs):
pass
# Test decorators
class Decorator(object):
def __init__(self, decorator_id):
super(Decorator, self).__init__()
self.decorator_id = decorator_id
def __call__(self, wrapped):
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
test_log('decorator', self.decorator_id)
return wrapped(*args, **kwargs)
return wrapper
def __repr__(self):
return '{}({!r})'.format(type(self).__name__, self.decorator_id)
decorator = Decorator
# Test view classes
@view_class_decorator(decorator('only_decorated_in_class_hierarchy'))
class OnlyDecoratedViewInClassHierarchy(View):
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', OnlyDecoratedViewInClassHierarchy)
return 'response'
@view_class_decorator(decorator('base'))
class BaseView(View):
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', BaseView)
return 'response'
@view_class_decorator(decorator('derived'), decorator('derived-b'))
class DerivedView(BaseView):
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', DerivedView)
return super(DerivedView, self).dispatch(request, *args, **kwargs)
@view_class_decorator(decorator('derived2'))
class DerivedView2(DerivedView):
def dispatch(self, request, *args, **kwargs):
test_log('dispatch', DerivedView2)
return super(DerivedView2, self).dispatch(request, *args, **kwargs)
# Tests
@mock.patch(__name__ + '.test_log', wraps=test_log)
class TestDecoration(TestCase):
def test_decorated_view_without_other_decorated_views_in_hierarchy(self, mock_test_log):
response = OnlyDecoratedViewInClassHierarchy.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator', 'only_decorated_in_class_hierarchy'),
mock.call('dispatch', OnlyDecoratedViewInClassHierarchy),
])
def test_decorated_view_with_decorated_subclass(self, mock_test_log):
response = BaseView.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator', 'base'),
mock.call('dispatch', BaseView),
])
def test_decorated_view_with_decorated_base_and_decorated_subclass(self, mock_test_log):
response = DerivedView.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator', 'derived'),
mock.call('decorator', 'derived-b'),
mock.call('decorator', 'base'),
mock.call('dispatch', DerivedView),
mock.call('dispatch', BaseView),
])
def test_decorated_view_with_decorated_base_and_decorated_grand_base(self, mock_test_log):
response = DerivedView2.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator', 'derived2'),
mock.call('decorator', 'derived'),
mock.call('decorator', 'derived-b'),
mock.call('decorator', 'base'),
mock.call('dispatch', DerivedView2),
mock.call('dispatch', DerivedView),
mock.call('dispatch', BaseView),
])
@mock.patch(__name__ + '.test_log', wraps=test_log)
class TestStackedDecoration(TestCase):
def test_view_class_decorator_with_multiple_parameters(self, mock_test_log):
@view_class_decorator(decorator(1), decorator(2))
class ViewClass(View):
def dispatch(self, request, *args, **kwargs):
test_log('dispatch')
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator', 1),
mock.call('decorator', 2),
mock.call('dispatch'),
])
def test_stacked_view_class_decorators(self, mock_test_log):
@view_class_decorator(decorator(1))
@view_class_decorator(decorator(2), decorator(3))
@view_class_decorator(decorator(4))
class ViewClass(View):
def dispatch(self, request, *args, **kwargs):
test_log('dispatch')
return 'response'
response = ViewClass.as_view()('request')
self.assertEqual(response, 'response')
self.assertListEqual(mock_test_log.mock_calls, [
mock.call('decorator', 1),
mock.call('decorator', 2),
mock.call('decorator', 3),
mock.call('decorator', 4),
mock.call('dispatch'),
])
class TestInternals(TestCase):
def test_as_view_method_gets_decorated_only_on_the_first_decorated_ancestor_view_class(self):
with mock.patch.object(ViewClassDecorator, '_ViewClassDecorator__decorate_the_as_view_method',
wraps=getattr(ViewClassDecorator, '_ViewClassDecorator__decorate_the_as_view_method')) \
as mock_decorate_the_as_view_method:
class BaseBase(View):
pass
@view_class_decorator(decorator(0))
class Base(BaseBase):
pass
@view_class_decorator(decorator(1))
class Derived1(Base):
pass
@view_class_decorator(decorator(2))
class Derived2(Base):
pass
@view_class_decorator(decorator(21))
class Derived2_1(Derived2):
pass
mock_decorate_the_as_view_method.assert_called_once_with(Base)
class TestDecorationErrors(TestCase):
def test_trying_to_decorate_a_non_view_class_fails(self):
class NonViewClass(object):
pass
self.assertRaisesMessage(
TypeError,
"The decorated view class ({}) doesn't have an as_view() method.".format(NonViewClass),
view_class_decorator(decorator(0)),
NonViewClass
)
| pasztorpisti/django-universal-view-decorator | tests/test_view_class_decorator.py | Python | mit | 6,461 |
import py
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.rlib.jit import JitDriver
class ListTests(object):
def test_basic_list(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'lst'])
def f(n):
lst = []
while n > 0:
myjitdriver.can_enter_jit(n=n, lst=lst)
myjitdriver.jit_merge_point(n=n, lst=lst)
lst.append(n)
n -= len(lst)
return len(lst)
res = self.meta_interp(f, [42], listops=True)
assert res == 9
def test_list_operations(self):
class FooBar:
def __init__(self, z):
self.z = z
def f(n):
lst = [41, 42]
lst[0] = len(lst) # [2, 42]
lst.append(lst[1]) # [2, 42, 42]
m = lst.pop() # 42
m += lst.pop(0) # 44
lst2 = [FooBar(3)]
lst2.append(FooBar(5))
m += lst2.pop().z # 49
return m
res = self.interp_operations(f, [11], listops=True)
assert res == 49
self.check_operations_history(call_i=1, call_n=2)
def test_list_of_voids(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'lst'])
def f(n):
lst = [None]
while n > 0:
myjitdriver.can_enter_jit(n=n, lst=lst)
myjitdriver.jit_merge_point(n=n, lst=lst)
lst = [None, None]
n -= 1
return len(lst)
res = self.meta_interp(f, [21], listops=True)
assert res == 2
def test_make_list(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'lst'])
def f(n):
lst = None
while n > 0:
lst = [0] * 10
myjitdriver.can_enter_jit(n=n, lst=lst)
myjitdriver.jit_merge_point(n=n, lst=lst)
n -= 1
return lst[n]
res = self.meta_interp(f, [21], listops=True, enable_opts='')
assert res == 0
def test_getitem(self):
myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'lst'])
def f(n):
lst = []
for i in range(n):
lst.append(i)
i = 0
while n > 0:
myjitdriver.can_enter_jit(n=n, lst=lst, i=i)
myjitdriver.jit_merge_point(n=n, lst=lst, i=i)
n -= lst[i]
i += 1
return lst[i]
res = self.meta_interp(f, [21], listops=True)
assert res == f(21)
self.check_resops(call=0)
def test_getitem_neg(self):
myjitdriver = JitDriver(greens = [], reds = ['i', 'n'])
def f(n):
x = i = 0
while i < 10:
myjitdriver.can_enter_jit(n=n, i=i)
myjitdriver.jit_merge_point(n=n, i=i)
lst = [41]
lst.append(42)
x = lst[n]
i += 1
return x
res = self.meta_interp(f, [-2], listops=True)
assert res == 41
self.check_resops(call=0, guard_value=0)
class TestLLtype(ListTests, LLJitMixin):
pass
| oblique-labs/pyVM | rpython/jit/metainterp/test/test_slist.py | Python | mit | 3,217 |
import string
import random
import logging
import os
from rootpy import asrootpy, log
from rootpy.plotting import Legend, Canvas, Pad, Graph
from rootpy.plotting.base import Color, MarkerStyle
from rootpy.plotting.utils import get_limits
import ROOT
# from external import husl
# suppress some nonsense logging messages when writing to pdfs.
# Also, setup default logger
log["/ROOT.TCanvas.Print"].setLevel(log.WARNING)
logging.basicConfig(level=logging.DEBUG)
log = log["/roofie"]
def is_plottable(obj):
"""
Check if the given object is considered a plottable.
Currently, TH1 and TGraph are considered plottable.
"""
return isinstance(obj, (ROOT.TH1, ROOT.TGraph))
class Styles(object):
# Define names of plot layouts:
class _Default_Style(object):
pt_per_cm = 28.4527625
titlefont = 43
labelfont = 43
markerSizepx = 4 # number of pixels of the marker
class Presentation_full(_Default_Style):
axisTitleSize = 14
axisLabelSize = 14
legendSize = 14
canvasWidth = 340
canvasHeight = 300
plot_margins = (.13, .05, .13, .1) # left, right, bottom, top
plot_ytitle_offset = 1.15 # factor of the normal offset :P, may lay outside of the canvas
class Presentation_half(_Default_Style):
axisTitleSize = 10
axisLabelSize = 10
legendSize = 10
canvasWidth = 170
canvasHeight = 150
plot_margins = (.3, .08, .2, .1)
plot_ytitle_offset = 1
class Public_full(_Default_Style):
axisTitleSize = 10
axisLabelSize = 8
legendSize = 8
canvasWidth = 340
canvasHeight = 300
plot_margins = (.13, .05, .13, .04)
plot_ytitle_offset = 1.15
def gen_random_name():
"""Generate a random name for temp hists"""
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(25))
def get_color_generator(palette='root', ncolors=10):
"""
Returns a generator for n colors.
Parameters
----------
palette : string
name of the color palette which should be used
ncolors : int
number of colors this palette should have, it might be ignored by some palettes!
Returns
-------
generator :
colors which can be digested by _rootpy_
"""
# generated with sns.palplot(sns.color_palette("colorblind", 10))
if palette == 'colorblind':
colors = ([(0.0, 0.4470588235294118, 0.6980392156862745),
(0.0, 0.6196078431372549, 0.45098039215686275),
(0.8352941176470589, 0.3686274509803922, 0.0),
(0.8, 0.4745098039215686, 0.6549019607843137),
(0.9411764705882353, 0.8941176470588236, 0.25882352941176473),
(0.33725490196078434, 0.7058823529411765, 0.9137254901960784)])
if palette == 'set2':
colors = ([(0.40000000596046448, 0.7607843279838562, 0.64705884456634521),
(0.98131487965583808, 0.55538641635109398, 0.38740485135246722),
(0.55432528607985565, 0.62711267120697922, 0.79595541393055635),
(0.90311419262605563, 0.54185316071790801, 0.76495195557089413),
(0.65371782148585622, 0.84708959004458262, 0.32827375098770734),
(0.9986312957370983, 0.85096502233954041, 0.18488274134841617),
(0.89573241682613591, 0.76784315109252932, 0.58182240093455595),
(0.70196080207824707, 0.70196080207824707, 0.70196080207824707)])
if palette == 'husl':
colors = [(0.9677975592919913, 0.44127456009157356, 0.5358103155058701),
(0.8616090647292522, 0.536495730113334, 0.19548899031476086),
(0.6804189127793346, 0.6151497514677574, 0.19405452111445337),
(0.46810256823426105, 0.6699492535792404, 0.1928958739904499),
(0.20125317221201128, 0.6907920815379025, 0.47966761189275336),
(0.21044753832183283, 0.6773105080456748, 0.6433941168468681),
(0.2197995660828324, 0.6625157876850336, 0.7732093159317209),
(0.433280341176423, 0.6065273407962815, 0.9585467098271748),
(0.8004936186423958, 0.47703363533737203, 0.9579547196007522),
(0.962272393509669, 0.3976451968965351, 0.8008274363432775)]
if palette == 'root':
# named colors of the ROOT TColor colorwheel are between 800 and 900, +1 to make them look better
colors = []
for i in range(0, ncolors):
colors.append((800 + int(100.0 / ncolors) * i) + 1)
if colors:
for color in colors:
yield color
else:
raise ValueError("Unknonw palette")
class Figure(object):
def __init__(self):
# User settable parameters:
self.title = ''
self.xtitle = ''
self.ytitle = ''
self.plot = self.Plot()
self.legend = self.Legend()
# Private:
self._plottables = []
self.style = Styles.Presentation_full
class Plot(object):
logx = False
logy = False
gridx = False
gridy = False
palette = 'root'
palette_ncolors = 10
xmin, xmax, ymin, ymax = None, None, None, None
frame = None
class Legend(object):
title = None
position = 'tl'
def _create_legend(self):
nentries = len([pdic['legend_title'] for pdic in self._plottables if pdic['legend_title'] != ''])
leg = Legend(nentries, leftmargin=0, rightmargin=0, entrysep=0.01,
textsize=self.style.legendSize, textfont=43, margin=0.1, )
if self.legend.title:
leg.SetHeader(self.legend.title)
leg.SetBorderSize(0) # no box
leg.SetFillStyle(0) # transparent background of legend TPave(!)
return leg
def _theme_plottable(self, obj):
try:
axes = obj.GetXaxis(), obj.GetYaxis()
for axis in axes:
axis.SetLabelSize(self.style.axisLabelSize)
axis.SetLabelFont(self.style.labelfont)
axis.SetTitleFont(self.style.titlefont)
axis.SetTitleSize(self.style.axisTitleSize)
# yaxis only settings:
axes[1].SetTitleOffset(self.style.plot_ytitle_offset)
except AttributeError:
# obj might not be of the right type
pass
# apply styles, this might need to get more fine grained
# markers are avilable in children of TAttMarker
if isinstance(obj, ROOT.TAttMarker):
# marker size 1 == 8 px, and never scales with canvas...
obj.SetMarkerSize(self.style.markerSizepx / 8.0)
def add_plottable(self, obj, legend_title='', markerstyle='circle', color=None, use_as_frame=None):
"""
Add a plottable objet to this figure. This function performs a
copy of the passed object and assigns it a random name. Once
commited, these should not be touched any more by the user!!!
Parameters
----------
obj : Hist1D, Graph, None
A root plottable object; If none, this object will only show up in the legend
legend_title : string
Title for this plottable as shown in the legend
"""
# Make a copy if we got a plottable
if obj is not None:
p = asrootpy(obj.Clone(gen_random_name()))
else:
p = ROOT.TLegendEntry()
if isinstance(p, ROOT.TH1):
p.SetDirectory(0) # make sure that the hist is not associated with a file anymore!
self._plottables.append({'p': p,
'legend_title': legend_title,
'markerstyle': markerstyle,
'color': color,
'use_as_frame': use_as_frame,
})
def import_plottables_from_canvas(self, canvas):
"""
Import plottables from a canvas which was previously created with roofie
Parameters
----------
canvas : Canvas
A canvas which was created with roofie.
Raises
------
ValueError :
The given canvas did not have the internal format as expected from roofie canvases
"""
pad = canvas.FindObject('plot')
if pad == None: # "is None" does not work since TObject is not None, but equal to None...
raise ValueError("Cannot import canvas, since it is not in roofie format.")
try:
legend = [p for p in pad.GetListOfPrimitives() if isinstance(p, ROOT.TLegend)][0]
except IndexError:
legend_entries = []
else:
legend_entries = [e for e in legend.GetListOfPrimitives()]
# load the plottables but ignore the frame
plottables = []
for p in pad.GetListOfPrimitives():
if is_plottable(p):
if p.GetName() != "__frame":
plottables.append({'p': asrootpy(p.Clone(gen_random_name()))})
for legend_entry in legend_entries:
if p == legend_entry.GetObject():
plottables[-1]['legend_title'] = legend_entry.GetLabel()
else:
self.xtitle = p.GetXaxis().GetTitle()
self.ytitle = p.GetYaxis().GetTitle()
# set legend title if any
if legend.GetHeader():
self.legend.title = legend.GetHeader()
self._plottables += plottables
def draw_to_canvas(self):
"""
Draw this figure to a canvas, which is then returned.
"""
if len(self._plottables) == 0:
raise IndexError("No plottables defined")
c = Canvas(width=self.style.canvasWidth,
height=self.style.canvasHeight,
size_includes_decorations=True)
if self.legend.position == 'seperate':
legend_width = .2
pad_legend = Pad(1 - legend_width, 0, 1., 1., name="legend")
pad_legend.SetLeftMargin(0.0)
pad_legend.SetFillStyle(0) # make this pad transparent
pad_legend.Draw()
else:
legend_width = 0
pad_plot = Pad(0., 0., 1 - legend_width, 1., name="plot", )
pad_plot.SetMargin(*self.style.plot_margins)
pad_plot.Draw()
pad_plot.cd()
# awkward hack around a bug in get limits where everything fails if one plottable is shitty...
xmin, xmax, ymin, ymax = None, None, None, None
for pdic in self._plottables:
try:
limits = get_limits(pdic['p'], logx=self.plot.logx, logy=self.plot.logy)
# Beware: Python 2 evaluates min/max of None in an undefined way with no error! Wow...
xmin = min([xmin, limits[0]]) if xmin is not None else limits[0]
xmax = max([xmax, limits[1]]) if xmax is not None else limits[1]
ymin = min([ymin, limits[2]]) if ymin is not None else limits[2]
ymax = max([ymax, limits[3]]) if ymax is not None else limits[3]
except (TypeError, ValueError):
# some plottables do not work with this rootpy function (eg. graph without points, tf1)
# TODO: should be fixed upstream
pass
# overwrite these ranges if defaults are given
if self.plot.xmin is not None:
xmin = self.plot.xmin
if self.plot.xmax is not None:
xmax = self.plot.xmax
if self.plot.ymax is not None:
ymax = self.plot.ymax
if self.plot.ymin is not None:
ymin = self.plot.ymin
if not all([val is not None for val in [xmin, xmax, ymin, ymax]]):
raise TypeError("unable to determine plot axes ranges from the given plottables")
colors = get_color_generator(self.plot.palette, self.plot.palette_ncolors)
# draw an empty frame within the given ranges;
frame_from_plottable = [p for p in self._plottables if p.get('use_as_frame')]
if len(frame_from_plottable) > 0:
frame = frame_from_plottable[0]['p'].Clone('__frame')
frame.Reset()
frame.SetStats(0)
frame.xaxis.SetRangeUser(xmin, xmax)
frame.yaxis.SetRangeUser(ymin, ymax)
frame.GetXaxis().SetTitle(self.xtitle)
frame.GetYaxis().SetTitle(self.ytitle)
self._theme_plottable(frame)
frame.Draw()
else:
frame = Graph()
frame.SetName("__frame")
# add a silly point in order to have root draw this frame...
frame.SetPoint(0, 0, 0)
frame.GetXaxis().SetLimits(xmin, xmax)
frame.GetYaxis().SetLimits(ymin, ymax)
frame.SetMinimum(ymin)
frame.SetMaximum(ymax)
frame.GetXaxis().SetTitle(self.xtitle)
frame.GetYaxis().SetTitle(self.ytitle)
self._theme_plottable(frame)
# Draw this frame: 'A' should draw the axis, but does not work if nothing else is drawn.
# L would draw a line between the points but is seems to do nothing if only one point is present
# P would also draw that silly point but we don't want that!
frame.Draw("AL")
xtick_length = frame.GetXaxis().GetTickLength()
ytick_length = frame.GetYaxis().GetTickLength()
for i, pdic in enumerate(self._plottables):
obj = pdic['p']
if isinstance(obj, ROOT.TLegendEntry):
_root_color = Color(pdic['color'])
_root_markerstyle = MarkerStyle(pdic['markerstyle'])
obj.SetMarkerStyle(_root_markerstyle('root'))
obj.SetMarkerColor(_root_color('root'))
elif isinstance(obj, (ROOT.TH1, ROOT.TGraph, ROOT.TF1)):
self._theme_plottable(obj)
obj.SetMarkerStyle(pdic.get('markerstyle', 'circle'))
if pdic.get('color', None):
obj.color = pdic['color']
else:
try:
color = next(colors)
except StopIteration:
log.warning("Ran out of colors; defaulting to black")
color = 1
obj.color = color
xaxis = obj.GetXaxis()
yaxis = obj.GetYaxis()
# Set the title to the given title:
obj.title = self.title
# the xaxis depends on the type of the plottable :P
if isinstance(obj, ROOT.TGraph):
# SetLimit on a TH1 is simply messing up the
# lables of the axis to screw over the user, presumably...
xaxis.SetLimits(xmin, xmax)
yaxis.SetLimits(ymin, ymax) # for unbinned data
# 'P' plots the current marker, 'L' would connect the dots with a simple line
# see: https://root.cern.ch/doc/master/classTGraphPainter.html for more draw options
drawoption = 'Psame'
elif isinstance(obj, ROOT.TH1):
obj.SetStats(0)
xaxis.SetRangeUser(xmin, xmax)
yaxis.SetRangeUser(ymin, ymax)
drawoption = 'same'
elif isinstance(obj, ROOT.TF1):
# xaxis.SetLimits(xmin, xmax)
# yaxis.SetLimits(ymin, ymax) # for unbinned data
drawoption = 'same'
obj.Draw(drawoption)
# Its ok if obj is non; then we just add it to the legend.
else:
raise TypeError("Un-plottable type given.")
pad_plot.SetTicks()
pad_plot.SetLogx(self.plot.logx)
pad_plot.SetLogy(self.plot.logy)
pad_plot.SetGridx(self.plot.gridx)
pad_plot.SetGridy(self.plot.gridy)
# do we have legend titles?
if any([pdic.get('legend_title') for pdic in self._plottables]):
leg = self._create_legend()
longest_label = 0
for pdic in self._plottables:
if not pdic.get('legend_title', False):
continue
leg.AddEntry(pdic['p'], pdic['legend_title'])
if len(pdic['legend_title']) > longest_label:
longest_label = len(pdic['legend_title'])
# Set the legend position
# vertical:
if self.legend.position.startswith('t'):
leg_hight = leg.y2 - leg.y1
leg.y2 = 1 - pad_plot.GetTopMargin() - ytick_length
leg.y1 = leg.y2 - leg_hight
elif self.legend.position.startswith('b'):
leg_hight = leg.y2 - leg.y1
leg.y1 = pad_plot.GetBottomMargin() + ytick_length
leg.y2 = leg.y1 + leg_hight
# horizontal:
if self.legend.position[1:].startswith('l'):
leg_width = 0.3
leg.x1 = pad_plot.GetLeftMargin() + xtick_length
leg.x2 = leg.x1 + leg_width
elif self.legend.position[1:].startswith('r'):
leg_width = 0.3
leg.x2 = 1 - pad_plot.GetRightMargin() - xtick_length
leg.x1 = leg.x2 - leg_width
if self.legend.position == 'seperate':
with pad_legend:
leg.Draw()
else:
leg.Draw()
if self.plot.logx:
pad_plot.SetLogx(True)
if self.plot.logy:
pad_plot.SetLogy(True)
pad_plot.Update() # needed sometimes with import of canvas. maybe because other "plot" pads exist...
return c
def delete_plottables(self):
"""
Delete all plottables in this figure so that it can be filled with
new ones while keeping the lables.
"""
self._plottables = []
def save_to_root_file(self, in_f, name, path=''):
"""
Save the current figure to the given root file under the given path
Parameters
----------
f : TFile
Root file object open in writable mode
name : str
Name for the canvas in the root file
path : str
The path where the figure should be saved within the root file
Returns
-------
TFile :
The file where the object was written to
"""
f = asrootpy(in_f)
c = self.draw_to_canvas()
c.name = name
try:
f.mkdir(path, recurse=True)
except ValueError:
pass
f.cd(path)
success = c.Write()
if success == 0:
raise ValueError("Could not write to file!")
return f
def save_to_file(self, path, name):
"""
Save the current figure to the given root file under the given path
Parameters
----------
name : string
Name of the file including its extension
path : string
Path excluding the file name, relative files are interpreted relative to the working dir
"""
# check if the name has the right extension
if len(name.split('.')) != 2:
raise ValueError("Filename must be given with extension")
if name.split('.')[1] != 'pdf':
raise NotImplementedError("Only PDF export is implemented at the moment")
# strip of tailing / if any
# this is not compatible with windows, I guess!
path = path.rstrip('/')
try:
os.makedirs(path)
except OSError:
pass
# The order of the following is important! First, set paper
# size, then draw the canvas and then create the pdf Doin
# pdf.Range(10, 10) is not sufficient. it just does random
# sh...
# Be careful to reset the global gStyle when we are finished. Yeah! Globals!
# Ok, Root does not like that either...
# paper_width, paper_height = ROOT.Double(), ROOT.Double()
# ROOT.gStyle.GetPaperSize(paper_width, paper_height)
ROOT.gStyle.SetPaperSize(self.style.canvasWidth / self.style.pt_per_cm,
self.style.canvasHeight / self.style.pt_per_cm,)
c = self.draw_to_canvas()
c.Print("{0}/{1}".format(path, name))
# reset the page size
# ROOT.gStyle.SetPaperSize(paper_width, paper_height)
| AudreyFrancisco/AliPhysics | PWGMM/MC/aligenqa/aligenqa/roofie/figure.py | Python | bsd-3-clause | 20,679 |
import joint
class Limb:
_name = "limb"
joints = {}
def __init__(self, name):
self._name = name
def add_joint(self, joint):
self.joints[joint.name] = joint
def shutdown(self):
for j in self.joints:
self.joints[j].shutdown() | mhespenh/eva | limb.py | Python | gpl-2.0 | 283 |
# -*- coding: utf-8 -*-
# Tests to prevent against recurrences of earlier bugs.
regression_tests = r"""
It should be possible to re-use attribute dictionaries (#3810)
>>> from django.newforms import *
>>> extra_attrs = {'class': 'special'}
>>> class TestForm(Form):
... f1 = CharField(max_length=10, widget=TextInput(attrs=extra_attrs))
... f2 = CharField(widget=TextInput(attrs=extra_attrs))
>>> TestForm(auto_id=False).as_p()
u'<p>F1: <input type="text" class="special" name="f1" maxlength="10" /></p>\n<p>F2: <input type="text" class="special" name="f2" /></p>'
#######################
# Tests for form i18n #
#######################
There were some problems with form translations in #3600
>>> from django.utils.translation import gettext_lazy, activate, deactivate
>>> class SomeForm(Form):
... username = CharField(max_length=10, label=gettext_lazy('Username'))
>>> f = SomeForm()
>>> print f.as_p()
<p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>
>>> activate('de')
>>> print f.as_p()
<p><label for="id_username">Benutzername:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>
>>> deactivate()
Unicode decoding problems...
>>> GENDERS = (('0', u'En tied\xe4'), ('1', u'Mies'), ('2', u'Nainen'))
>>> class SomeForm(Form):
... somechoice = ChoiceField(choices=GENDERS, widget=RadioSelect())
>>> f = SomeForm()
>>> f.as_p()
u'<p><label for="id_somechoice_0">Somechoice:</label> <ul>\n<li><label><input type="radio" id="id_somechoice_0" value="0" name="somechoice" /> En tied\xe4</label></li>\n<li><label><input type="radio" id="id_somechoice_1" value="1" name="somechoice" /> Mies</label></li>\n<li><label><input type="radio" id="id_somechoice_2" value="2" name="somechoice" /> Nainen</label></li>\n</ul></p>'
#######################
# Miscellaneous Tests #
#######################
There once was a problem with Form fields called "data". Let's make sure that
doesn't come back.
>>> class DataForm(Form):
... data = CharField(max_length=10)
>>> f = DataForm({'data': 'xyzzy'})
>>> f.is_valid()
True
>>> f.cleaned_data
{'data': u'xyzzy'}
"""
| jonaustin/advisoryscan | django/tests/regressiontests/forms/regressions.py | Python | mit | 2,175 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This is derived from a cadquery script for generating QFP/GullWings models in X3D format.
#
# from https://bitbucket.org/hyOzd/freecad-macros
# author hyOzd
#
## file of parametric definitions
import collections
from collections import namedtuple
import FreeCAD, Draft, FreeCADGui
import ImportGui
import FreeCADGui as Gui
import shaderColors
import exportPartToVRML as expVRML
## base parametes & model
import collections
from collections import namedtuple
# Import cad_tools
import cq_cad_tools
# Reload tools
from cq_cad_tools import reload_lib
reload_lib(cq_cad_tools)
# Explicitly load all needed functions
from cq_cad_tools import FuseObjs_wColors, GetListOfObjects, restore_Main_Tools, \
exportSTEP, close_CQ_Example, exportVRML, saveFCdoc, z_RotateObject, Color_Objects, \
CutObjs_wColors, checkRequirements
# Sphinx workaround #1
try:
QtGui
except NameError:
QtGui = None
#
try:
# Gui.SendMsgToActiveView("Run")
# from Gui.Command import *
Gui.activateWorkbench("CadQueryWorkbench")
import cadquery
cq = cadquery
from Helpers import show
# CadQuery Gui
except: # catch *all* exceptions
msg = "missing CadQuery 0.3.0 or later Module!\r\n\r\n"
msg += "https://github.com/jmwright/cadquery-freecad-module/wiki\n"
if QtGui is not None:
reply = QtGui.QMessageBox.information(None,"Info ...",msg)
# maui end
#checking requirements
checkRequirements(cq)
| easyw/kicad-3d-models-in-freecad | cadquery/FCAD_script_generator/Buzzer_Beeper/cq_parameters.py | Python | gpl-2.0 | 1,472 |
from sympy import expand, simplify
from galgebra.printer import Format, xpdf
from galgebra.ga import Ga
g = '1 # #,' + \
'# 1 #,' + \
'# # 1'
Format()
ng3d = Ga('e1 e2 e3', g=g)
(e1, e2, e3) = ng3d.mv()
print('g_{ij} =', ng3d.g)
E = e1 ^ e2 ^ e3
Esq = (E * E).scalar()
print('E =', E)
print('%E^{2} =', Esq)
Esq_inv = 1 / Esq
E1 = (e2 ^ e3) * E
E2 = (-1) * (e1 ^ e3) * E
E3 = (e1 ^ e2) * E
print('E1 = (e2^e3)*E =', E1)
print('E2 =-(e1^e3)*E =', E2)
print('E3 = (e1^e2)*E =', E3)
w = (E1 | e2)
w = w.expand()
print('E1|e2 =', w)
w = (E1 | e3)
w = w.expand()
print('E1|e3 =', w)
w = (E2 | e1)
w = w.expand()
print('E2|e1 =', w)
w = (E2 | e3)
w = w.expand()
print('E2|e3 =', w)
w = (E3 | e1)
w = w.expand()
print('E3|e1 =', w)
w = (E3 | e2)
w = w.expand()
print('E3|e2 =', w)
w = (E1 | e1)
w = (w.expand()).scalar()
Esq = expand(Esq)
print('%(E1\\cdot e1)/E^{2} =', simplify(w / Esq))
w = (E2 | e2)
w = (w.expand()).scalar()
print('%(E2\\cdot e2)/E^{2} =', simplify(w / Esq))
w = (E3 | e3)
w = (w.expand()).scalar()
print('%(E3\\cdot e3)/E^{2} =', simplify(w / Esq))
xpdf(paper='letter', prog=True)
| arsenovic/galgebra | doc/python/ReciprocalBasis.py | Python | bsd-3-clause | 1,108 |
# -*- coding: utf-8 -*-
from flask import render_template, redirect
from ffsjp import app, db, models
from ffsjp.base import Base
import copy
base = Base()
@app.route('/')
def index():
base_local = copy.deepcopy(base)
base_local.setPage(models.Pages.query.filter_by(model='index').first())
return render_template('index.html', base = base_local)
@app.route('/<page_name>/')
def page(page_name):
pages_l = ['services', 'games', 'index']
pages_redirect = ['materials']
base_local = copy.deepcopy(base)
if page_name in pages_l:
base_local.setPage(models.Pages.query.filter_by(model=page_name).first())
return render_template(page_name + '.html', base = base_local)
elif page_name in pages_redirect:
return redirect(app.config['HOME_PATH'] + page_name + '/list/')
else:
base_local.setTitle('404 Page not found')
return render_template('errors/404.html', base = base_local), 404
@app.route('/materials/list/')
@app.route('/materials/list/<cat_name>/')
def materials(cat_name = ''):
base_local = copy.deepcopy(base)
base_local.setPage(models.Pages.query.filter_by(model='materials').first())
category = models.Cat.query.order_by(models.Cat.ord).all()
this_cat = ''
if cat_name:
data = models.Materials.query.filter_by(category = cat_name).order_by(models.Materials.ord).all()
if data:
base_local.setData(data)
this_cat = models.Cat.query.filter_by(name = cat_name).first()
else:
return redirect(app.config['HOME_PATH'] + 'materials/list/')
else:
base_local.setData(models.Materials.query.order_by(models.Materials.ord).all())
return render_template('materials.html', base = base_local, category = category, this_cat = this_cat)
@app.errorhandler(404)
def page_not_found(error):
base_local = copy.deepcopy(base)
base_local.setTitle('404 Not found')
return render_template('errors/404.html', base = base_local), 404
| ffsjp/ffsjp | ffsjp/views.py | Python | mit | 1,999 |
from __future__ import division
from pylab import *
import random
# there already is a sample in the namespace from numpy
from random import sample as smpl
import itertools
import utils
utils.backup(__file__)
import synapses
class AbstractSource(object):
def __init__(self):
"""
Initialize all relevant variables.
"""
raise NotImplementedError
def next(self):
"""
Returns the next input
"""
raise NotImplementedError
def global_range(self):
"""
Returns the maximal global index of all inputs
"""
raise NotImplementedError
def global_index(self):
"""
Returns the current global (unique) index of the current input
"character"
"""
raise NotImplementedError
def generate_connection_e(self,N_e):
"""
Generates connection matrix W_eu from input to the excitatory
population
Parameters:
N_e: int
Number of excitatory units
"""
raise NotImplementedError
def generate_connection_i(self,N_i):
"""
Generates connection matrix W_iu from input to the inhibitory
population
Parameters:
N_i: int
Number of inhibitory units
"""
raise NotImplementedError
def update_W_eu(self,W_eu):
"""
Modifies W_eu if necessary
Called every step in SORN after next() was called but before
W_eu is used
"""
pass
class CountingSource(AbstractSource):
"""
Source for the counting task.
Different of words are presented with individual probabilities.
"""
def __init__(self, words,probs, N_u_e, N_u_i, avoid=False,
permute_ambiguous=False):
"""
Initializes variables.
Parameters:
words: list
The words to present
probs: matrix
The probabilities of transitioning between word i and j
It is assumed that they are summing to 1
N_u: int
Number of active units per step
avoid: bool
Avoid same excitatory units for different words
"""
self.word_index = 0 #Index for word
self.ind = 0 #Index within word
self.words = words #different words
self.probs = probs #Probability of transitioning from i to j
self.N_u_e = int(N_u_e) #Number active per step
self.N_u_i = int(N_u_i)
self.avoid = avoid
self.permute_ambiguous = permute_ambiguous
self.alphabet = unique("".join(words))
self.N_a = len(self.alphabet)
self.lookup = dict(zip(self.alphabet,range(self.N_a)))
self.glob_ind = [0]
self.glob_ind.extend(cumsum(map(len,words)))
self.predict = self.predictability()
self.reset()
@classmethod
def init_simple(cls, N_words, N_letters, word_length, max_fold_prob,
N_u_e, N_u_i, avoiding, words=None, seed=None):
"""
Construct the arguments for the source to make it usable for the
cluster
Parameters:
N_words: int
Number of different words
N_letters: int
Number of letters to generate words from
word_length: list
Range of length (unimodal distribution)
max_fold_prob: float
maximal probability difference between words
N_u_e: int
Number of active excitatory units per step
N_u_i: int
Number of active inhibitory units per step
avoid: bool
Avoid same excitatory units for different words
"""
newseed = np.random.randint(0,4000000000)
if seed is not None:
np.random.seed(seed=seed)
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
assert(N_letters <= len(letters))
letters = array([x for x in letters[:N_letters]])
if words is None:
words = []
for i in range(N_words):
word = letters[np.random.randint(0,N_letters,
np.random.randint(
word_length[0], word_length[1]+1))]
words.append(''.join(word))
else:
assert(N_words == len(words) and
N_letters == len(unique(''.join(words))))
probs = array([np.random.rand(N_words)*(max_fold_prob-1)+1]
*N_words)
# Normalize
probs /= sum(probs,1)
if seed is not None:
np.random.seed(seed=newseed)
return CountingSource(words, probs, N_u_e, N_u_i,avoid=avoiding)
def generate_connection_e(self,N_e):
W = zeros((N_e,self.N_a))
available = set(range(N_e))
for a in range(self.N_a):
temp = random.sample(available,self.N_u_e)
W[temp,a] = 1
if self.avoid:
available = available.difference(temp)
# The underscore has the special property that it doesn't
# activate anything:
if '_' in self.lookup:
W[:,self.lookup['_']] = 0
c = utils.Bunch(use_sparse=False,
lamb=np.inf,
avoid_self_connections=False)
ans = synapses.create_matrix((N_e,self.N_a),c)
ans.W = W
return ans
def generate_connection_i(self,N_i):
c = utils.Bunch(use_sparse=False,
lamb=np.inf,
avoid_self_connections=False)
ans = synapses.create_matrix((N_i,self.N_a),c)
W = zeros((N_i, self.N_a))
if N_i>0:
available = set(range(N_i))
for a in range(self.N_a):
temp = random.sample(available,self.N_u_i)
W[temp,a] = 1
#~ if self.avoid: # N_i is smaller -> broad inhibition?
#~ available = available.difference(temp)
if '_' in self.lookup:
W[:,self.lookup['_']] = 0
ans.W = W
return ans
def char(self):
word = self.words[self.word_index]
return word[self.ind]
def index(self):
character = self.char()
ind = self.lookup[character]
return ind
def next_word(self):
self.ind = 0
w = self.word_index
p = self.probs[w,:]
self.word_index = find(rand()<=cumsum(p))[0]
def next(self):
self.ind = self.ind+1
string = self.words[self.word_index]
if self.ind >= len(string):
self.next_word()
ans = zeros(self.N_a)
ans[self.index()] = 1
return ans
def reset(self):
self.next_word()
self.ind = -1
def global_index(self):
return self.glob_ind[self.word_index]+self.ind
def global_range(self):
return self.glob_ind[-1]
def trial_finished(self):
return self.ind+1 >= len(self.words[self.word_index])
def update_W_eu(self,W_eu):
if not self.permute_ambiguous:
return
# Init
if not hasattr(self,'A_neurons'):
self.A_neurons = where(W_eu.W[:,self.lookup['A']]==1)[0]
self.B_neurons = where(W_eu.W[:,self.lookup['B']]==1)[0]
self.N_u = int(len(self.A_neurons))
assert(self.N_u == len(self.B_neurons))
self.N_A = {}
self.N_B = {}
for letter in [word[0] for word in self.words]:
letter_index = self.lookup[letter]
self.N_A[letter] = sum(W_eu.W[self.A_neurons,letter_index]).round().astype(int)
self.N_B[letter] = sum(W_eu.W[self.B_neurons,letter_index]).round().astype(int)
# When new word presented, permute its input
# careful! simple permutation goes crazy when inputs overlap
if self.ind == 0:
letter = self.char()
if letter in ['A','B']:
return
letter_index = self.index()
# First set both to zero so that the second doesn't set
# units of the first back to zero (overlap case)
W_eu.W[self.A_neurons,letter_index] *= 0
W_eu.W[self.B_neurons,letter_index] *= 0
W_eu.W[self.A_neurons[smpl(xrange(self.N_u),self.N_A[letter])],
letter_index] = 1
W_eu.W[self.B_neurons[smpl(xrange(self.N_u),self.N_B[letter])],
letter_index] = 1
def predictability(self):
temp = self.probs
for n in range(10):
temp = temp.dot(temp)
final = temp[0,:]
#Let's assume that all words have unique initial letters
probs = map(len, self.words)
probs = array(probs)
probs = (probs + self.probs.max(1)-1)/probs
return sum(final*probs)
class RandomLetterSource(CountingSource):
def __init__(self, N_letters, N_u_e, N_u_i, avoid=False):
letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
assert(N_letters <= len(letters))
words = [x for x in letters[:N_letters]]
probs = ones((N_letters,N_letters))
probs /= sum(probs,1)
super(RandomLetterSource, self).__init__(words, probs, N_u_e,
N_u_i,avoid=avoid)
class TrialSource(AbstractSource):
"""
This source takes any other source and gives it a trial-like
structure with blank periods inbetween stimulation periods
The source has to implement a trial_finished method that is True
if it is at the end of one trial
"""
def __init__(self,source,blank_min_length,blank_var_length,
defaultstim,resetter=None):
assert(hasattr(source,'trial_finished'))
self.source = source
self.blank_min_length = blank_min_length
self.blank_var_length = blank_var_length
self.reset_blank_length()
self.defaultstim = defaultstim
self.resetter = resetter
self._reset_source()
self.blank_step = 0
def reset_blank_length(self):
if self.blank_var_length > 0:
self.blank_length = self.blank_min_length\
+ randint(self.blank_var_length)
else:
self.blank_length = self.blank_min_length
def next(self):
if not self.source.trial_finished():
return self.source.next()
else:
if self.blank_step >= self.blank_length:
self.blank_step = 0
self._reset_source()
self.reset_blank_length()
return self.source.next()
else:
self.blank_step += 1
return self.defaultstim
def _reset_source(self):
if self.resetter is not None:
getattr(self.source,self.resetter)()
def global_range(self):
return source.global_range()
def global_index(self):
if self.blank_step > 0:
return -1
return self.source.global_index()
def generate_connection_e(self, N_e):
return self.source.generate_connection_e(N_e)
def generate_connection_i(self, N_i):
return self.source.generate_connection_i(N_i)
def update_W_eu(self,W_eu):
self.source.update_W_eu(W_eu)
class AndreeaCountingSource(AbstractSource):
"""
This was only for debugging purposes - it resembles her matlab code
perfectly
"""
def __init__(self,sequence,sequence_U,pop,train):
self.pop = pop-1
self.seq = sequence[0]-1
self.seq_u = sequence_U[0]-1
self.t = -1
# change m,n,x to make them identical
if train:
self.seq[self.seq==2] = 80
self.seq[self.seq==3] = 2
self.seq[self.seq==4] = 3
self.seq[self.seq==80] = 4
self.seq_u[self.seq_u>13] = (self.seq_u[self.seq_u>13]%7)+7
self.lookup = {'A':0,'B':1,'M':2,'N':3,'X':4}
else:
self.seq[self.seq==2] = 7
self.seq[self.seq==9] = 2
self.seq[self.seq==3] = 5
self.seq[self.seq==10] = 3
self.seq[self.seq==4] = 6
self.seq[self.seq==11] = 4
self.lookup = {'A':0,'B':1,'X':7,'M':5,'N':6,'C':2,'D':3,
'E':4}
#~ self.lookup = {'A':0,'B':1,'X':2,'M':3,'N':4,'C':9,
#~ 'D':10,'E':11}
self.alphabet = 'ABCDEMNX'
self.words = ['AXXXXXM','BXXXXXN','CXXXXXN','CXXXXXM',
'DXXXXXN','DXXXXXM','EXXXXXN','EXXXXXM']
self.glob_ind = [0]
self.glob_ind.extend(cumsum(map(len,self.words)))
self.N_a = self.seq.max()+1
def next(self):
self.t += 1
tmp = zeros((self.N_a))
tmp[self.seq[self.t]] = 1
return tmp
def global_range(self):
return self.seq_u.max()
def global_index(self):
return self.seq_u[self.t]
def generate_connection(self,N_e):
W = np.zeros((N_e,self.N_a))
for i in range(self.N_a):
if i <= 4: #--> A,B,X,M,N
W[self.pop[:,i],i] = 1
if i == 9:
W[self.pop[0:2,0],i] = 1
W[self.pop[2:10,1],i] = 1
if i == 10:
W[self.pop[0:5,0],i] = 1
W[self.pop[5:10,1],i] = 1
if i == 11:
W[self.pop[0:8,0],i] = 1
W[self.pop[8:10,1],i] = 1
self.W = W
return W
class NoSource(AbstractSource):
"""
No input for the spontaneous conditions
Parameters:
N_u: int
Number of input units
"""
def __init__(self,N_u=1):
self.N_u = N_u
def next(self):
return np.zeros((self.N_u))
def global_range(self):
return 1
def global_index(self):
return -1
def generate_connection_e(self,N_e):
c = utils.Bunch(use_sparse=False,
lamb=np.inf, # cannot be 0
avoid_self_connections=False)
tmpsyn = synapses.create_matrix((N_e,self.N_u),c)
tmpsyn.set_synapses(tmpsyn.get_synapses()*0)
return tmpsyn
def generate_connection_i(self,N_i):
c = utils.Bunch(use_sparse=False,
lamb=np.inf, # cannot be 0
avoid_self_connections=False)
tmpsyn = synapses.create_matrix((N_i,self.N_u),c)
tmpsyn.set_synapses(tmpsyn.get_synapses()*0)
return tmpsyn
class RandomSource(AbstractSource):
"""
Poisson input spike trains.
"""
def __init__(self, firing_rate, N_neurons, connection_density,
eta_stdp):
"""
Initialize the source
Parameters:
firing_rate: double
The firing rate of all input neurons
N_neurons: int
The number of poisson input units
connection_density: double
Density of connections from input to excitatory pop
eta_stdp: double
STDP rate for the W_eu matrix
"""
self.rate = firing_rate
self.N = N_neurons
self.density = connection_density
self.eta_stdp = eta_stdp
def next(self):
return rand(self.N)<=self.rate
def global_range(self):
return 1
def global_index(self):
return 0
def generate_connection(self,N_e):
c = utils.Bunch(use_sparse=False,
lamb=self.density*N_e,
avoid_self_connections=False,
#CHANGE should this be different?
eta_stdp = self.eta_stdp)
tmp = synapses.create_matrix((N_e,self.N),c)
# get correct connection density
noone = True
while(noone):
tmp.set_synapses((rand(N_e,self.N)<self.density).astype(
float))
if sum(tmp.get_synapses()) > 0:
noone = False
return tmp
#Useful for turning an index into a direction
mapping = {0:(+1,+0),
1:(-1,+0),
2:(+0,+1),
3:(+0,-1),
4:(+1,+1),
5:(+1,-1),
6:(-1,+1),
7:(-1,-1)}
class DriftingSource(AbstractSource):
"""
One of Philip's sources. Drifting objects in different directions
"""
def __init__(self,c):
self.X = c.X #num horizontal pixels
self.Y = c.Y #num vertical pixels
#number of directions of possible movement
self.num_direction = c.num_direction
self.num_steps = c.num_steps
self.choose_each_step = c.choose_each_step
self.symbol = c.symbol # Number of bits for each symbol
#~ self.N = c.N #number of neurons
self.change_prob = c.change_prob #probability of changing dir
self.direction = random.randrange(0,self.num_direction)
self.all = list(itertools.product(range(c.X),range(c.Y)))
self.active = {}
def get_new_symbols(self,next):
everything = set(self.all)
active = set(next)
remain = list(everything.difference(active))
random.shuffle(remain)
activated = remain[:self.choose_each_step]
return activated
def next(self):
if random.random() < self.change_prob:
#random.randint has different endpoint behaviour from
# np.random.randint!!
self.direction = random.randrange(0,self.num_direction)
temp = {}
(dx,dy) = mapping[self.direction]
for (x,y) in self.active.keys():
count = self.active[(x,y)] - 1
if count <= 0:
continue
nx = (x+dx)%self.X
ny = (y+dy)%self.Y
temp[(nx,ny)] = count
activated = self.get_new_symbols(temp.keys())
for k in activated:
temp[k] = self.num_steps
self.active = temp
# Now convert that into representation
ans = np.zeros((self.X,self.Y))
for (x,y) in self.active.keys():
ans[x,y] = 1
ans.shape = self.X*self.Y
return ans
def generate_connection(self,N):
W = np.zeros((N,self.X,self.Y))
available = set(range(N))
for a in range(self.X):
for b in range(self.Y):
temp = random.sample(available,self.symbol)
W[temp,a,b] = 1
available = available.difference(temp)
W.shape = (N,self.X*self.Y)
c = utils.Bunch(use_sparse=False,
lamb=np.inf,
avoid_self_connections=False)
ans = synapses.create_matrix((N,self.X*self.Y),c)
ans.W = W
return ans
def global_range(self):
return self.num_direction
def global_index(self):
return self.direction
| Saran-nns/SORN | common/sources.py | Python | mit | 19,478 |
# -*- coding: utf-8 -*-
# Standard library imports
import re
from collections import namedtuple
from functools import total_ordering
# Local imports
from . import compat
__all__ = [
'ParseError',
'Version',
'parse_version',
'default_version',
]
# Strange nuke version pattern
nuke_version_pattern = (
r'(?P<major>\d+)'
r'\.(?P<minor>\d+)'
r'(?:v)'
r'(?P<patch>\d+)$'
)
# Version pattern with 4 digits
four_version_pattern = (
r'(?:v)?'
r'(?P<major>\d+)'
r'\.(?P<minor>\d+)'
r'\.(?P<revision>\d+)'
r'\.(?P<build>\d+)'
r'(?:-(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$'
)
# Modified regex from semver.org - works with calver as well
semver_version_pattern = (
r'(?:v)?'
r'(?P<major>\d+)'
r'\.(?P<minor>\d+)'
r'\.(?P<patch>\d+)'
r'(?:-(?P<prerelease>(?:\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)'
r'(?:\.(?:\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?'
r'(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$'
)
simplever_pattern = r'(?:v)?(?P<version>(\d+\.?)+)$'
VersionBase = namedtuple(
'Version',
['major', 'minor', 'patch', 'prerelease', 'buildmetadata', 'string']
)
@total_ordering
class Version(VersionBase):
_defaults = {
'major': 0,
'minor': 0,
'patch': 0,
'prerelease': None,
'buildmetadata': None
}
def __str__(self):
return self.string
def __hash__(self):
return super(Version, self).__hash__()
def _comparable_value(self, value, other):
'''Return a value that will be comparable with other.'''
types_match = type(value) is type(other)
if types_match or isinstance(value, compat.numeric_types):
return value
if other is None:
return value
if value is None and isinstance(other, compat.string_types):
return 'zzzzzzzz'
if isinstance(other, compat.numeric_types):
return -float('inf')
return value
def _comparable(self, other):
'''Generate a comparison key for a Version object.
Coerces types of prerelease and buildmeta to ensure that the Version
objects are comparable. This is required because Python 3 now raises
a TypeError when attempting to compare str and int.
'''
return (
self.major,
self.minor,
self.patch,
self._comparable_value(self.prerelease, other.prerelease),
self._comparable_value(self.buildmetadata, other.buildmetadata),
)
def __lt__(self, other):
if not isinstance(other, Version):
raise ValueError('Can only compare two Version objects.')
return self._comparable(other) < other._comparable(self)
def __eq__(self, other):
if not isinstance(other, Version):
raise ValueError('Can only compare two Version objects.')
return tuple(other) == tuple(self)
class ParseError(Exception):
'''Raised when a parse method fails.'''
def parse_version(string):
'''Parse and return a Version from the provided string.
Supports:
- semver / calver
- simple versions like: 10, v2, 1.0, 2.2.4
Arguments:
string (str): String to parse version from.
Returns:
Version object
Raises:
ParseError when a version can not be parsed.
'''
# Parse weird nuke versioning
match = re.search(nuke_version_pattern, string)
if match:
return Version(
major=int(match.group('major')),
minor=int(match.group('minor')),
patch=int(match.group('patch')),
prerelease=None,
buildmetadata=None,
string=match.group(0)
)
# Parse four_version - four digit version
match = re.search(four_version_pattern, string)
if match:
return Version(
major=int(match.group('major')),
minor=int(match.group('minor')),
patch=int(match.group('revision')),
prerelease=int(match.group('build')),
buildmetadata=match.group('buildmetadata'),
string=match.group(0),
)
# Parse Semver / Calver
match = re.search(semver_version_pattern, string)
if match:
return Version(
major=int(match.group('major')),
minor=int(match.group('minor')),
patch=int(match.group('patch')),
prerelease=match.group('prerelease'),
buildmetadata=match.group('buildmetadata'),
string=match.group(0),
)
# Parse Simple version
match = re.search(simplever_pattern, string)
if match:
kwargs = dict(Version._defaults)
kwargs['string'] = match.group(0)
version_parts = match.group('version').split('.')
for part, part_name in zip(version_parts, ['major', 'minor', 'patch']):
kwargs[part_name] = int(part)
return Version(**kwargs)
raise ParseError('Could not parse version from %s' % string)
def default_version():
return Version(
major=0,
minor=1,
patch=0,
prerelease=None,
buildmetadata=None,
string='0.1.0',
)
| cpenv/autocpenv | packages/cpenv/versions.py | Python | mit | 5,219 |
arrBad = [
'2g1c',
'2 girls 1 cup',
'acrotomophilia',
'anal',
'anilingus',
'anus',
'arsehole',
'ass',
'asses',
'asshole',
'assmunch',
'auto erotic',
'autoerotic',
'babeland',
'baby batter',
'ball gag',
'ball gravy',
'ball kicking',
'ball licking',
'ball sack',
'ball sucking',
'bangbros',
'bareback',
'barely legal',
'barenaked',
'bastardo',
'bastinado',
'bbw',
'bdsm',
'beaver cleaver',
'beaver lips',
'bestiality',
'bi curious',
'big black',
'big breasts',
'big knockers',
'big tits',
'bimbos',
'birdlock',
'bitch',
'black cock',
'blonde action',
'blonde on blonde action',
'blow j',
'blow your l',
'blue waffle',
'blumpkin',
'bollocks',
'bondage',
'boner',
'boob',
'boobs',
'booty call',
'brown showers',
'brunette action',
'bukkake',
'bulldyke',
'bullet vibe',
'bung hole',
'bunghole',
'busty',
'butt',
'buttcheeks',
'butthole',
'camel toe',
'camgirl',
'camslut',
'camwhore',
'carpet muncher',
'carpetmuncher',
'chocolate rosebuds',
'circlejerk',
'cleveland steamer',
'clit',
'clitoris',
'clover clamps',
'clusterfuck',
'cock',
'cocks',
'coprolagnia',
'coprophilia',
'cornhole',
'cum',
'cumming',
'cunnilingus',
'cunt',
'darkie',
'date rape',
'daterape',
'deep throat',
'deepthroat',
'dick',
'dildo',
'dirty pillows',
'dirty sanchez',
'dog style',
'doggie style',
'doggiestyle',
'doggy style',
'doggystyle',
'dolcett',
'domination',
'dominatrix',
'dommes',
'donkey punch',
'double dong',
'double penetration',
'dp action',
'eat my ass',
'ecchi',
'ejaculation',
'erotic',
'erotism',
'escort',
'ethical slut',
'eunuch',
'faggot',
'fecal',
'felch',
'fellatio',
'feltch',
'female squirting',
'femdom',
'figging',
'fingering',
'fisting',
'foot fetish',
'footjob',
'frotting',
'fuck',
'fucking',
'fuck buttons',
'fudge packer',
'fudgepacker',
'futanari',
'g-spot',
'gag',
'gang bang',
'gay sex',
'genitals',
'giant cock',
'girl on',
'girl on top',
'girls gone wild',
'goatcx',
'goatse',
'gokkun',
'golden shower',
'goo girl',
'goodpoop',
'goregasm',
'grope',
'group sex',
'guro',
'hand job',
'handjob',
'hard core',
'hardcore',
'hentai',
'homoerotic',
'honkey',
'hooker',
'hot chick',
'how to kill',
'how to murder',
'huge fat',
'humping',
'incest',
'intercourse',
'jack off',
'jail bait',
'jailbait',
'jerk off',
'jigaboo',
'jiggaboo',
'jiggerboo',
'jizz',
'juggs',
'kike',
'kinbaku',
'kinkster',
'kinky',
'knobbing',
'leather restraint',
'leather straight jacket',
'lemon party',
'lolita',
'lovemaking',
'make me come',
'male squirting',
'masturbate',
'menage a trois',
'milf',
'missionary position',
'motherfucker',
'mound of venus',
'mr hands',
'muff diver',
'muffdiving',
'nambla',
'nawashi',
'negro',
'neonazi',
'nig nog',
'nigga',
'nigger',
'nimphomania',
'nipple',
'nipples',
'nsfw images',
'nude',
'nuder',
'nudity',
'nympho',
'nymphomania',
'octopussy',
'omorashi',
'one cup two girls',
'one guy one jar',
'orgasm',
'orgy',
'paedophile',
'panties',
'panty',
'pedobear',
'pedophile',
'pegging',
'penis',
'phone sex',
'piece of shit',
'piss pig',
'pissing',
'pisspig',
'playboy',
'pleasure chest',
'pole smoker',
'ponyplay',
'poof',
'poop chute',
'poopchute',
'porn',
'porno',
'pornography',
'prince albert piercing',
'pthc',
'pubes',
'pussy',
'queaf',
'raghead',
'raging boner',
'rape',
'raping',
'rapist',
'rectum',
'reverse cowgirl',
'rimjob',
'rimming',
'rosy palm',
'rosy palm and her 5 sisters',
'rusty trombone',
's&m',
'sadism',
'scat',
'schlong',
'scissoring',
'semen',
'sex',
'sexo',
'sexy',
'shaved beaver',
'shaved pussy',
'shemale',
'shibari',
'shit',
'shota',
'shrimping',
'slanteye',
'slut',
'smut',
'snatch',
'snowballing',
'sodomize',
'sodomy',
'spic',
'spooge',
'spread legs',
'strap on',
'strapon',
'strappado',
'strip club',
'style doggy',
'suck',
'sucks',
'suicide girls',
'sultry women',
'swastika',
'swinger',
'tainted love',
'taste my',
'tea bagging',
'threesome',
'throating',
'tied up',
'tight white',
'tit',
'tits',
'titties',
'titty',
'tongue in a',
'topless',
'tosser',
'towelhead',
'tranny',
'tribadism',
'tub girl',
'tubgirl',
'tushy',
'twat',
'twink',
'twinkie',
'two girls one cup',
'undressing',
'upskirt',
'urethra play',
'urophilia',
'vagina',
'venus mound',
'vibrator',
'violet blue',
'violet wand',
'vorarephilia',
'voyeur',
'vulva',
'wank',
'wet dream',
'wetback',
'white power',
'whore',
'women rapping',
'wrapping men',
'wrinkled starfish',
'xx',
'xxx',
'yaoi',
'yellow showers',
'yiffy',
'zoophilia'] | wasimreza08/Word-search | word_scripts/profanity_filter.py | Python | mit | 4,348 |
from flask import url_for
from flask.ext.script import Manager
from main import app
manager = Manager(app)
@manager.command
def list_routes():
import urllib
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
url = url_for(rule.endpoint, **options)
line = urllib.unquote("{:50s} {:20s} {}".format(rule.endpoint, methods, url))
output.append(line)
for line in sorted(output):
print(line)
if __name__ == "__main__":
manager.run()
| AltCtrlSupr/acs-nginx-api | manager.py | Python | gpl-3.0 | 629 |
# coding=utf-8
"""Dialog test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'serhat@yilmazturk.info'
__date__ = '2017-03-22'
__copyright__ = 'Copyright 2017, Serhat YILMAZTURK'
import unittest
from PyQt4.QtGui import QDialogButtonBox, QDialog
from ndvist_dialog import NDVISTDialog
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class NDVISTDialogTest(unittest.TestCase):
"""Test dialog works."""
def setUp(self):
"""Runs before each test."""
self.dialog = NDVISTDialog(None)
def tearDown(self):
"""Runs after each test."""
self.dialog = None
def test_dialog_ok(self):
"""Test we can click OK."""
button = self.dialog.button_box.button(QDialogButtonBox.Ok)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Accepted)
def test_dialog_cancel(self):
"""Test we can click cancel."""
button = self.dialog.button_box.button(QDialogButtonBox.Cancel)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Rejected)
if __name__ == "__main__":
suite = unittest.makeSuite(NDVISTDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| syilmazturk/NDVIST | test/test_ndvist_dialog.py | Python | mit | 1,506 |
#!/usr/bin/env python3.2
# -*- coding: utf-8 -*-
"""
Compare two spectra and return the cosine distance between them.
The returned value is between 0 and 1, a returned value of 1
represents highest similarity.
Example:
>>> spec1 = pymzml.spec.Spectrum(measuredPrecision = 20e-5)
>>> spec2 = pymzml.spec.Spectrum(measuredPrecision = 20e-5)
>>> spec1.peaks = [ ( 1500,1 ), ( 1502,2.0 ), (1300,1 )]
>>> spec2.peaks = [ ( 1500,1 ), ( 1502,1.3 ), (1400,2 )]
>>> spec1.similarityTo( spec2 )
0.5682164685724541
>>> spec1.similarityTo( spec1 )
1.0000000000000002
"""
from __future__ import print_function
import sys
import pymzml
import get_example_file
def main(verbose = False):
examplemzML = get_example_file.open_example('BSA1.mzML')
if verbose:
print("""
comparing specs
""")
run = pymzml.run.Reader(examplemzML)
tmp = []
for spec in run:
if spec['ms level'] == 1:
if verbose:
print("Parsing spec lvl 1 has id {0}".format(spec['id']))
tmp.append(spec.deRef())
if len(tmp) >= 3:
break
if verbose:
print("Print total number of specs collected {0}".format(len(tmp) ) )
print("cosine between spec 1 & 2 is ",tmp[0].similarityTo(tmp[1]))
print("cosine score between spec 1 & 3 is ",tmp[0].similarityTo(tmp[2]))
print("cosine score between spec 2 & 3 is ",tmp[1].similarityTo(tmp[2]))
print("cosine score between first spec against itself ",tmp[0].similarityTo(tmp[0]))
if tmp[0].similarityTo(tmp[0]) == 1:
return True
else:
return False
if __name__ == '__main__':
main(verbose = True)
| hroest/pymzML | example_scripts/compareSpectra.py | Python | lgpl-3.0 | 1,732 |
# -*- coding: utf-8 -*-
'''
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, urlparse, urllib, base64
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import dom_parser2
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['vodly.us', 'vodly.unblocked.pro']
self.base_link = 'http://vodly.us'
self.search_link = '/search?s=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
items = []
clean_title = cleantitle.geturl(title) +'-'+ year
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = cache.get(client.request, 1, search_url)
r = client.parseDOM(r, 'div', {'class': 'col-sm-12'})
r = client.parseDOM(r, 'div', {'class': 'col-sm-2.+?'})
r1 = client.parseDOM(r, 'h3')
r1 = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'a')[0])for i in r1]
y = [re.findall('</i>\s*(\d{4})</span>', i) for i in r]
items += [(r1[i], y[i]) for i in range(len(y))]
r = [(i[0][0], i[1][0], i[0][1]) for i in items if
(cleantitle.get(i[0][1]) == cleantitle.get(title) and i[1][0] == year)]
url = r[0][0]
return url
except Exception:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
url = urlparse.urljoin(self.base_link,url)
r = cache.get(client.request, 1, url)
try:
v = client.parseDOM(r, 'iframe', ret='data-src')[0]
url = v.split('=')[1]
try:
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
except:
pass
r = client.parseDOM(r, 'tbody')
r = client.parseDOM(r, 'tr')
r = [(re.findall('<td>(.+?)</td>', i)[0], client.parseDOM(i, 'a', ret='href')[0]) for i in r]
if r:
for i in r:
try:
host = i[0]
url = urlparse.urljoin(self.base_link, i[1])
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
if 'other'in host: continue
sources.append({
'source': host,
'quality': 'SD',
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
return sources
except Exception:
return
def resolve(self, url):
if self.base_link in url:
url = client.request(url)
url = client.parseDOM(url, 'div', attrs={'class': 'wrap'})
url = client.parseDOM(url, 'a', ret='href')[0]
return url | mrquim/repository.mrquim | script.module.exodus/lib/resources/lib/sources/en/vodly.py | Python | gpl-2.0 | 4,365 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from anvil import colorizer
from anvil import log
from anvil.actions import base as action
LOG = log.getLogger(__name__)
class UninstallAction(action.Action):
@property
def lookup_name(self):
return 'uninstall'
def _order_components(self, components):
components = super(UninstallAction, self)._order_components(components)
components.reverse()
return components
def _run(self, persona, component_order, instances):
removals = ['configure']
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info('Unconfiguring %s.', colorizer.quote(i.name)),
run=lambda i: i.unconfigure(),
end=None,
),
component_order,
instances,
'unconfigure',
*removals
)
removals += ['post-install']
self._run_phase(
action.PhaseFunctors(
start=None,
run=lambda i: i.pre_uninstall(),
end=None,
),
component_order,
instances,
'pre-uninstall',
*removals
)
removals += ['package-install', 'package-install-all-deps']
general_package = "general"
dependency_handler = self.distro.dependency_handler_class(
self.distro, self.root_dir, instances.values())
self._run_phase(
action.PhaseFunctors(
start=lambda i: LOG.info("Uninstalling packages"),
run=lambda i: dependency_handler.uninstall(),
end=None,
),
[general_package],
{general_package: instances[general_package]},
"package-uninstall",
*removals
)
| pombredanne/anvil | anvil/actions/uninstall.py | Python | apache-2.0 | 2,482 |
from pycukes import *
@Given('I have a calculator')
def i_have_a_calc(context):
pass
@When('I enter with 1 + -2 and press =')
def one_plus_minus_two(context):
pass
@Then('I see -1 in my LCD')
def fail(context):
assert None
| hltbra/pycukes | specs/steps/sum_of_one_and_two_negative_with_two_oks_and_one_fail.py | Python | mit | 238 |
import sublime
import sublime_plugin
try:
from GitGutter.view_collection import ViewCollection
except ImportError:
from view_collection import ViewCollection
def plugin_loaded():
"""
Ugly hack for icons in ST3
kudos:
github.com/facelessuser/BracketHighlighter/blob/BH2ST3/bh_core.py#L1380
"""
from os import makedirs
from os.path import exists, join
icon_path = join(sublime.packages_path(), "Theme - Default")
if not exists(icon_path):
makedirs(icon_path)
class GitGutterCommand(sublime_plugin.WindowCommand):
region_names = ['deleted_top', 'deleted_bottom',
'deleted_dual', 'inserted', 'changed',
'untracked', 'ignored']
def run(self, force_refresh=False):
self.view = self.window.active_view()
if not self.view:
# View is not ready yet, try again later.
sublime.set_timeout(self.run, 1)
return
self.clear_all()
if ViewCollection.untracked(self.view):
self.bind_files('untracked')
elif ViewCollection.ignored(self.view):
self.bind_files('ignored')
else:
# If the file is untracked there is no need to execute the diff
# update
if force_refresh:
ViewCollection.clear_git_time(self.view)
inserted, modified, deleted = ViewCollection.diff(self.view)
self.lines_removed(deleted)
self.bind_icons('inserted', inserted)
self.bind_icons('changed', modified)
def clear_all(self):
for region_name in self.region_names:
self.view.erase_regions('git_gutter_%s' % region_name)
def lines_to_regions(self, lines):
regions = []
for line in lines:
position = self.view.text_point(line - 1, 0)
region = sublime.Region(position, position)
regions.append(region)
return regions
def lines_removed(self, lines):
top_lines = lines
bottom_lines = [line - 1 for line in lines if line > 1]
dual_lines = []
for line in top_lines:
if line in bottom_lines:
dual_lines.append(line)
for line in dual_lines:
bottom_lines.remove(line)
top_lines.remove(line)
self.bind_icons('deleted_top', top_lines)
self.bind_icons('deleted_bottom', bottom_lines)
self.bind_icons('deleted_dual', dual_lines)
def icon_path(self, icon_name):
if icon_name in ['deleted_top','deleted_bottom','deleted_dual']:
if self.view.line_height() > 15:
icon_name = icon_name + "_arrow"
if int(sublime.version()) < 3014:
path = '..'
extn = ''
else:
path = 'Packages'
extn = '.png'
return path + '/GitGutter/icons/' + icon_name + extn
def bind_icons(self, event, lines):
regions = self.lines_to_regions(lines)
event_scope = event
if event.startswith('deleted'):
event_scope = 'deleted'
scope = 'markup.%s.git_gutter' % event_scope
icon = self.icon_path(event)
self.view.add_regions('git_gutter_%s' % event, regions, scope, icon)
def bind_files(self, event):
lines = []
lineCount = ViewCollection.total_lines(self.view)
i = 0
while i < lineCount:
lines += [i + 1]
i = i + 1
self.bind_icons(event, lines)
| cristianomarjar/SublimeText | git_gutter.py | Python | mit | 3,628 |
# -*- coding: utf-8 -*-
#
#
# This file is a part of 'linuxacademy-dl' project.
#
# Copyright (c) 2016-2017, Vassim Shahir
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import unicode_literals, print_function, with_statement
from . import __title__
from ._session import session
from concurrent.futures import ThreadPoolExecutor
from requests_futures.sessions import FuturesSession
from .hls_decrypt import HLSDecryptAES128
from six import BytesIO
from contextlib import closing
import sys
import os
import subprocess
import multiprocessing
import tempfile
import logging
logger = logging.getLogger(__title__)
class DownloadEngine(object):
POOL_SIZE = multiprocessing.cpu_count() * 2
def __init__(self, use_ffmpeg=True, skip_existing=True):
self.use_ffmpeg = use_ffmpeg
self.skip_existing = skip_existing
self.session = FuturesSession(
executor=ThreadPoolExecutor(max_workers=self.POOL_SIZE),
session=session
)
def hls_download(self, hls_data, save_as):
try:
contents = [
self.session.get(
url, background_callback=lambda s, r: r.close()
)
for url in hls_data['data']
]
ts_accumulator = tempfile.NamedTemporaryFile() \
if self.use_ffmpeg \
else open(save_as, "wb")
for idx, content in enumerate(contents, 0):
chunk_resp = content.result()
chunk = chunk_resp.content
logger.debug(
'Fetched chunk_resp #{} ({}/{})'.format(
idx, chunk_resp.headers['Content-Length'], len(chunk)
)
)
if hls_data.get('encryption'):
iv = hls_data.get('iv', idx)
key = session.get(hls_data.get('key_uri')).content
with closing(BytesIO(chunk)) as fp:
with HLSDecryptAES128(
fp, key, iv
).decrypt() as dec_itm:
ts_accumulator.write(dec_itm.read())
else:
ts_accumulator.write(chunk)
if self.use_ffmpeg:
self.ffmpeg_process(ts_accumulator.name, save_as)
except OSError as exc:
logger.critical('Failed to download: %s', exc)
finally:
ts_accumulator.close()
def safe_process_download_path(self, save_path, file_name, make_dirs=True):
sys_encoding = sys.getdefaultencoding()
d, f = map(
lambda x: x.encode(sys_encoding, 'ignore').decode('utf-8'),
(save_path, file_name)
) if sys_encoding != 'utf-8' else (save_path, file_name)
if make_dirs:
try:
os.makedirs(d)
except:
pass
return os.path.join(d, f)
def __call__(self, download_info, save_to):
final_path = self.safe_process_download_path(
save_to, download_info['save_resource_as']
)
if self.skip_existing and os.path.exists(final_path):
logger.info("Skipping already existing file {}".format(final_path))
else:
logger.info("Downloading {}".format(final_path))
self.hls_download(download_info, final_path)
logger.info("Downloaded {}".format(final_path))
def ffmpeg_process(self, input_file_name, output_file_name):
command = [
'ffmpeg',
'-loglevel', 'fatal',
'-i', input_file_name,
'-y',
'-bsf:a', 'aac_adtstoasc',
'-vcodec', 'copy',
'-c', 'copy',
'-crf', '50',
'-f', 'mp4', output_file_name
]
logger.debug('Executing FFMPEG Command "{}"'.format(' '.join(command)))
subprocess.call(command)
| vassim/linuxacademy-dl | linuxacademy_dl/downloader.py | Python | bsd-3-clause | 5,435 |
import types
from abc import ABCMeta, abstractmethod
from importlib import import_module
import framework.db as db
from framework.core.util import extract_artifact_id, is_uri, get_feature_from_uri
class BaseType(object):
# These are set at registration.
name = None
uri = None
def __init__(self, *args, **kwargs):
raise Exception("Do not instantiate this.")
@classmethod
def dereference(cls, reference):
"""Pass by value dereference producing an instance of the bound type.
The bound type is the real object expected by a method annotated by
this class.
Basic type checking should be performed here via normalize.
"""
raise NotImplementedError("`dereference` must be implemented by a subclass.")
@classmethod
def instantiate(cls, argument, study, name):
"""Instantiate the results of a method on the database.
Basic type checking should be performed here via check_type.
"""
raise NotImplementedError("`instantiate` must be implemented by a subclass.")
@classmethod
def normalize(cls, reference):
"""Normalize a reference and validate the reference is of correct type.
"""
raise NotImplementedError("`normalize` must be implemented by a subclass.")
@classmethod
def check_type(cls, argument):
"""Check that an instance of the bound type is of the correct type."""
raise NotImplementedError("`check_type` must be implemented by a subclass.")
@classmethod
def annotation(cls):
raise NotImplementedError("`annotation` must be implemented by a subclass.")
class Primitive(BaseType):
"""Primitives are values that are encoded entirely in the reference itself.
"""
@classmethod
def annotation(cls):
return {
'uri': cls.uri,
'content': 'primitive'
}
@classmethod
def check_type(cls, reference):
cls.normalize(reference)
@classmethod
def instantiate(cls, argument, *_):
# By definition primitives can encode their value in their own
# reference, so instantiate is actually just a dereference.
return cls.dereference(argument)
@classmethod
def dereference(cls, reference):
reference = cls.normalize(reference)
return reference
class Parameterized(BaseType):
"""Parameterized types are produced from type factories.
They encode some restriction on the type's domain or generalize it to a
restricted collection.
"""
# these properties will be defined by the parameterized type factory
@property
@classmethod
def subtype(cls):
raise NotImplementedError("`subtype` must be set by a subclass.")
@property
@classmethod
def args(cls):
raise NotImplementedError("`args` must be set by a subclass.")
@classmethod
def annotation(cls):
return {
'uri': cls.uri,
'content': 'parameterized',
'subtype': cls.subtype.annotation(),
'args': cls.args
}
class Artifact(BaseType):
"""Basic units of input/output in a study not captured by a primitive.
"""
@classmethod
def dereference(cls, reference):
uri = cls.normalize(reference)
artifact_id = extract_artifact_id(uri)
artifact = db.ArtifactProxy.get(id=artifact_id)
return cls.load(artifact.artifact.data)
@classmethod
def instantiate(cls, argument, study, name):
cls.check_type(argument)
type_ = db.Type.get(uri=cls.uri)
artifact = db.Artifact(type=type_, data=cls.save(argument), study=study)
artifact.save()
artifact_proxy = db.ArtifactProxy(name=name, artifact=artifact,
study=study)
artifact_proxy.save()
return artifact_proxy.uri
@classmethod
def normalize(cls, reference):
if not isinstance(reference, str):
reference = reference.decode('utf-8')
artifact_id = extract_artifact_id(reference)
type_uri = db.ArtifactProxy.get(id=artifact_id).artifact.type.uri
if type_uri != cls.uri:
raise TypeError()
return reference
@classmethod
def check_type(cls, argument):
if not isinstance(argument, cls.data_type):
raise TypeError("%r is not an instance of %r." % (argument,
cls.data_type))
@classmethod
def annotation(cls):
return {
'uri': cls.uri,
'content': 'artifact'
}
@property
@classmethod
def data_type(cls):
"""Developer-defined property for describing the bound type."""
raise NotImplementedError("`data_type` must be set by a subclass.")
@classmethod
def load(cls, blob):
"""Load an instance from database.
"""
raise NotImplementedError("`load` must be implemented by a subclass.")
@classmethod
def save(cls, blob):
"""Save an instance to the database.
"""
raise NotImplementedError("`save` must be implemented by a subclass.")
class _TypeRegistry(object):
def __init__(self):
self._artifacts = {}
self._primitives = {}
self._parameterized = {}
def artifact(self, uri, cls):
if not issubclass(cls, Artifact):
raise TypeError("Class %r must be a subclass of %r." %
(cls, Artifact))
if self.has_type(uri):
raise Exception()
if cls.name is None:
cls.name = cls.__name__
cls.uri = uri
self._artifacts[uri] = cls
return cls
def primitive(self, cls):
if not issubclass(cls, Primitive):
raise TypeError("Class %r must be a subclass of %r." %
(cls, Primitive))
cls_name = cls.__name__
uri = '/system/types/primitives/%s' % cls_name
if self.has_type(uri):
raise Exception()
if cls.name is None:
cls.name = cls_name
cls.uri = uri
self._primitives[uri] = cls
return cls
def parameterized(self, function):
if not isinstance(function, types.FunctionType):
raise TypeError("%r must be a function." % function)
uri = '/system/types/parameterized/%s' % function.__name__
name = function.__name__
def wrapped_factory(*args, **kwargs):
param_type = function(*args, **kwargs)
param_type.uri = uri
param_type.name = name
return param_type
wrapped_factory.uri = uri
wrapped_factory.name = name
if self.has_type(uri):
raise Exception()
self._parameterized[uri] = wrapped_factory
return wrapped_factory
def has_type(self, uri):
return ((uri in self._artifacts) or
(uri in self._primitives) or
(uri in self._parameterized))
def get_type(self, uri):
if uri in self._artifacts:
return self._artifacts[uri]
if uri in self._primitives:
return self._primitives[uri]
if uri in self._parameterized:
return self._parameterized[uri]
else:
raise ValueError("Unrecognized URI %r." % uri)
def get_types(self):
master = {}
master.update(self._artifacts.copy())
master.update(self._primitives.copy())
master.update(self._parameterized.copy())
return master
def get_artifact_types(self):
return self._artifacts.copy()
def get_primitive_types(self):
return self._primitives.copy()
def get_parameterized_types(self):
return self._parameterized.copy()
type_registry = _TypeRegistry()
| biocore/metoo | framework/types/__init__.py | Python | bsd-3-clause | 7,874 |
#!/usr/bin/env python3
import command_set_test
import command_test
| aaronlbrink/cs108FinalProject | src/test.py | Python | gpl-3.0 | 68 |
#!/usr/bin/env python -t
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Jonathan Delvaux <pyshell@djoproject.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyshell.arg.checker.argchecker import ArgChecker
from pyshell.utils.string65 import isString
TYPENAME = "string"
class StringArgChecker(ArgChecker):
def __init__(self, minimum_string_size=0, maximum_string_size=None):
ArgChecker.__init__(self, 1, 1, True)
if type(minimum_string_size) != int:
excmsg = ("Minimum string size must be an integer, got type '%s' "
"with the following value <%s>")
excmsg %= (str(type(minimum_string_size)),
str(minimum_string_size),)
self._raiseArgInitializationException(excmsg)
if minimum_string_size < 0:
excmsg = ("Minimum string size must be a positive value bigger or "
"equal to 0, got <%s>")
excmsg %= str(minimum_string_size)
self._raiseArgInitializationException(excmsg)
if maximum_string_size is not None:
if type(maximum_string_size) != int:
excmsg = ("Maximum string size must be an integer, got type "
"'%s' with the following value <%s>")
excmsg %= (str(type(maximum_string_size)),
str(maximum_string_size),)
self._raiseArgInitializationException(excmsg)
if maximum_string_size < 1:
excmsg = ("Maximum string size must be a positive value bigger"
" than 0, got <%s>")
excmsg %= str(maximum_string_size)
self._raiseArgInitializationException(excmsg)
if (minimum_string_size is not None and
maximum_string_size is not None and
maximum_string_size < minimum_string_size):
excmsg = ("Maximum string size <%s> can not be smaller than "
"Minimum string size <%s>")
excmsg %= (str(maximum_string_size), str(minimum_string_size),)
self._raiseArgInitializationException(excmsg)
self.minimum_string_size = minimum_string_size
self.maximum_string_size = maximum_string_size
def getValue(self, value, arg_number=None, arg_name_to_bind=None):
value = ArgChecker.getValue(self, value, arg_number, arg_name_to_bind)
if value is None:
excmsg = "the string arg can't be None"
self._raiseArgException(excmsg, arg_number, arg_name_to_bind)
if not isString(value):
if not hasattr(value, "__str__"):
excmsg = "this value '%s' is not a valid string, got type '%s'"
excmsg %= (str(value), str(type(value)),)
self._raiseArgException(excmsg, arg_number, arg_name_to_bind)
value = str(value)
if len(value) < self.minimum_string_size:
excmsg = ("this value '%s' is a too small string, got size '%s' "
"with value '%s', minimal allowed size is '%s'")
excmsg %= (str(value),
str(len(value)),
str(value),
str(self.minimum_string_size),)
self._raiseArgException(excmsg, arg_number, arg_name_to_bind)
if (self.maximum_string_size is not None and
len(value) > self.maximum_string_size):
excmsg = ("this value '%s' is a too big string, got size '%s' with"
" value '%s', maximal allowed size is '%s'")
excmsg %= (str(value),
str(len(value)),
str(value),
str(self.maximum_string_size),)
self._raiseArgException(excmsg, arg_number, arg_name_to_bind)
return value
def getUsage(self):
return "<string>"
@classmethod
def getTypeName(cls):
return TYPENAME
| djo938/supershell | pyshell/arg/checker/string43.py | Python | gpl-3.0 | 4,532 |
import numpy as np
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
Adopted from https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def iou(bbox_1, bbox_2):
"""
Get IoU value of two bboxes
:param bbox_1:
:param bbox_2:
:return: IoU
"""
w_1 = bbox_1[2] - bbox_1[0] + 1
h_1 = bbox_1[3] - bbox_1[1] + 1
w_2 = bbox_2[2] - bbox_2[0] + 1
h_2 = bbox_2[3] - bbox_2[1] + 1
area_1 = w_1 * h_1
area_2 = w_2 * h_2
overlap_bbox = (max(bbox_1[0], bbox_2[0]), max(bbox_1[1], bbox_2[1]),
min(bbox_1[2], bbox_2[2]), min(bbox_1[3], bbox_2[3]))
overlap_w = max(0, (overlap_bbox[2] - overlap_bbox[0] + 1))
overlap_h = max(0, (overlap_bbox[3] - overlap_bbox[1] + 1))
overlap_area = overlap_w * overlap_h
union_area = area_1 + area_2 - overlap_area
IoU = overlap_area * 1.0 / union_area
return IoU
def viou(traj_1, duration_1, traj_2, duration_2):
""" compute the voluminal Intersection over Union
for two trajectories, each of which is represented
by a duration [fstart, fend) and a list of bounding
boxes (i.e. traj) within the duration.
"""
if duration_1[0] >= duration_2[1] or duration_1[1] <= duration_2[0]:
return 0.
elif duration_1[0] <= duration_2[0]:
head_1 = duration_2[0] - duration_1[0]
head_2 = 0
if duration_1[1] < duration_2[1]:
tail_1 = duration_1[1] - duration_1[0]
tail_2 = duration_1[1] - duration_2[0]
else:
tail_1 = duration_2[1] - duration_1[0]
tail_2 = duration_2[1] - duration_2[0]
else:
head_1 = 0
head_2 = duration_1[0] - duration_2[0]
if duration_1[1] < duration_2[1]:
tail_1 = duration_1[1] - duration_1[0]
tail_2 = duration_1[1] - duration_2[0]
else:
tail_1 = duration_2[1] - duration_1[0]
tail_2 = duration_2[1] - duration_2[0]
v_overlap = 0
for i in range(tail_1 - head_1):
roi_1 = traj_1[head_1 + i]
roi_2 = traj_2[head_2 + i]
left = max(roi_1[0], roi_2[0])
top = max(roi_1[1], roi_2[1])
right = min(roi_1[2], roi_2[2])
bottom = min(roi_1[3], roi_2[3])
v_overlap += max(0, right - left + 1) * max(0, bottom - top + 1)
v1 = 0
for i in range(len(traj_1)):
v1 += (traj_1[i][2] - traj_1[i][0] + 1) * (traj_1[i][3] - traj_1[i][1] + 1)
v2 = 0
for i in range(len(traj_2)):
v2 += (traj_2[i][2] - traj_2[i][0] + 1) * (traj_2[i][3] - traj_2[i][1] + 1)
return float(v_overlap) / (v1 + v2 - v_overlap) | xdshang/VidVRD-helper | evaluation/common.py | Python | mit | 3,685 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-21 14:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0082_auto_20161215_1623'),
]
operations = [
migrations.RenameModel(
old_name='Claimed',
new_name='Claimant',
),
migrations.RenameField(
model_name='claimant',
old_name='claimedship_grant',
new_name='claimantship_grant',
),
migrations.RenameField(
model_name='fund',
old_name='claimed',
new_name='claimant',
),
migrations.AlterField(
model_name='expense',
name='funds_from',
field=models.CharField(choices=[('C', 'Continuing (claimantship)'), ('I', 'Core (Software Sustainability Institute)'), ('F', 'Grant (inauguration claimantship)')], default='C', max_length=1),
),
]
| softwaresaved/fat | lowfat/migrations/0083_auto_20161221_1411.py | Python | bsd-3-clause | 1,018 |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see starthinker/scripts for possible source):
# - Command: "python starthinker_ui/manage.py airflow"
#
###########################################################################
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
Storage Bucket
Create and permission a bucket in Storage.
- Specify the name of the bucket and who will have owner permissions.
- Existing buckets are preserved.
- Adding a permission to the list will update the permissions but removing them will not.
- You have to manualy remove grants.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'auth_write':'service', # Credentials used for writing data.
'bucket_bucket':'', # Name of Google Cloud Bucket to create.
'bucket_emails':'', # Comma separated emails.
'bucket_groups':'', # Comma separated groups.
}
RECIPE = {
'tasks':[
{
'bucket':{
'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}},
'bucket':{'field':{'name':'bucket_bucket','kind':'string','order':2,'default':'','description':'Name of Google Cloud Bucket to create.'}},
'emails':{'field':{'name':'bucket_emails','kind':'string_list','order':3,'default':'','description':'Comma separated emails.'}},
'groups':{'field':{'name':'bucket_groups','kind':'string_list','order':4,'default':'','description':'Comma separated groups.'}}
}
}
]
}
dag_maker = DAG_Factory('bucket', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
| google/starthinker | dags/bucket_dag.py | Python | apache-2.0 | 4,423 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Kai Kratzer, Universität Stuttgart, ICP,
# Allmandring 3, 70569 Stuttgart, Germany; all rights
# reserved unless otherwise stated.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
# os-related
import sys
sys.path.append('../server/modules')
# custom
import configpoints
if len(sys.argv) < 2:
print("Usage:", sys.argv[0], "<../server/DB/DB-file>")
exit(1)
cfph = configpoints.configpoints('none', sys.argv[1])
print(cfph.show_table())
| freshs/freshs | scripts/ffs_show_DB.py | Python | gpl-3.0 | 1,168 |
import requests
from PIL import Image, ImageEnhance, ImageChops, ImageFilter
from io import BytesIO, StringIO
import time
import sys, os
import codecs
url = 'http://d1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net'
imgurl = url + '/captcha.php'
headers = { 'Host' : 'd1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net',
'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64; rv:37.0) Gecko/20100101 Firefox/37.0',
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language' : 'en-US,en;q=0.5',
'Accept-Encoding' : 'gzip, deflate',
'DNT' : '1',
'Referer' : 'http://http://d1222391-23d7-46de-abef-73cbb63c1862.levels.pathwar.net/',
'Cookie' : 'PHPSESSID=',#erased
'Authorization' : 'Basic ',#erased
# 'Connection' : 'keep-alive',
'Content-Type' : 'application/x-www-form-urlencoded' }
def recognize(img, bounds):
# read dataset of images for each letter
imgs = {}
datfile = open("ads.dat", "rt")
line = datfile.readline()
while line!="":
key = line[0]
if key not in imgs:
imgs[key] = []
imgs[key].append(Image.open(StringIO.StringIO(line[2:-1].decode("hex"))))
line = datfile.readline()
datfile.close()
# calculate difference with dataset for each boundbox
word = ""
for bound in bounds:
guess = []
total = (img.crop(bound).size)[0]*(img.crop(bound).size)[1]*1.0
for key in imgs:
for pattern in imgs[key]:
diff = ImageChops.difference(img.crop(bound), pattern.resize(img.crop(bound).size, Image.NEAREST))
pixels = list(diff.getdata())
samePixCnt = sum(i==0 for i in pixels)
guess.append([samePixCnt, key])
guess.sort(reverse=True)
word = word+guess[0][1]
print(total, guess[0:3], guess[0][0]/total, guess[1][0]/total, guess[2][0]/total)
print(word)
return word.replace("_", "")
def separate(img):
# count number of pixels for each column
colPixCnts = []
for col in range(img.size[0]):
pixels = list(img.crop([col, 0, col+1, img.size[1]]).getdata())
colPixCnts.append(sum(i==0 for i in pixels))
print (colPixCnts)
print("\n")
# average out pixel counts for trough column
for i in range(3, len(colPixCnts)-3, 2):
if colPixCnts[i-3]>4 and colPixCnts[i+3]>4:
colPixCnts[i-2:i+3] = [j+10 for j in colPixCnts[i-2:i+3]]
print(colPixCnts)
print("\n")
# calculate all bounding boxes of all letters
bounds = []
left = 0
right = 0
for col in range(img.size[0]): # slice all letters per column
if left==0 and colPixCnts[col]>20: # if (begin not set) and (col has letter)
left = col # then letter begin
if left!=0 and colPixCnts[col]<=20: # if (begin is set) and (col no letter)
right = col # then letter end
if right-left>8: # if (the letter is wide enough)
##############################################
print((right-left))
top = -1
bottom = -1
prev = -1
curr = -1
for row in range(img.size[1]): # slice single letter per row
pixels = list(img.crop([left, row, right, row+1]).getdata())
rowPixCnt = sum(i==255 for i in pixels)
if rowPixCnt==(right-left): # if (row no letter)
curr = row
if (curr-prev)>(bottom-top): # if (the letter is tall enough)
top = prev
bottom = curr
prev = curr
if (img.size[1]-prev)>(bottom-top): # if (the letter align to bottom)
top = prev
bottom = img.size[1]
##############################################
bounds.append([left, top+1, right, bottom]) # top row should has letter
left = 0
right = 0
print(bounds)
return bounds
def prepare(im):
im2 = Image.new("P",im.size,255)
for x in range(im.size[1]):
for y in range(im.size[0]):
pix = im.getpixel((y,x))
if pix == 1: # these are the numbers to get
im2.putpixel((y,x),0)
# im2 = im2.convert("RGB")
im2 = im2.resize((im2.size[0]*8, im2.size[1]*8), Image.BILINEAR)
# im2 = im2.resize((int(im2.size[0] / 2), int(im2.size[1] / 2)), Image.ANTIALIAS)
# im2 = ImageEnhance.Contrast(im2).enhance(1.4)
# im2 = ImageEnhance.Sharpness(im2).enhance(5)
# im2 = ImageChops.invert(im2)
# im2 = im2.filter(ImageFilter.MedianFilter(3))
# im2 = im2.convert('P')
return im2
def _train(img, bounds):
datfile = open("ads.dat", "rt")
lines = datfile.readlines()
datfile.close()
datfile = open("ads.dat", "at")
for bound in bounds:
img.crop(bound).show()
letter = input("Type in the letters you see in the image above (ENTER to skip): ")
bmpfile = BytesIO()
img.crop(bound).save(bmpfile, format='BMP')
# g = codecs.encode(bmpfile.getvalue(), 'hex_codec')
s = codecs.encode(bmpfile.getvalue(), 'hex')
s = codecs.decode(s)
line = letter+"|"+s+"\n"
if (letter!="") and (line not in lines): # if (not skipped) and (not duplicated)
datfile.write(line)
print(line)
bmpfile.close()
datfile.close()
def vertical_cut(im):
im = im.convert("P")
im2 = Image.new("P",im.size,255)
im = im.convert("P")
temp = {}
for x in range(im.size[1]):
for y in range(im.size[0]):
pix = im.getpixel((y,x))
temp[pix] = pix
if pix == 1: # these are the numbers to get
im2.putpixel((y,x),0)
# new code starts here
inletter = False
foundletter=False
start = 0
end = 0
letters = []
for y in range(im2.size[0]): # slice across
for x in range(im2.size[1]): # slice down
pix = im2.getpixel((y,x))
if pix != 255:
inletter = True
if foundletter == False and inletter == True:
foundletter = True
start = y
if foundletter == True and inletter == False:
foundletter = False
end = y
letters.append((start,end))
inletter=False
bounds = []
for letter in letters:
bounds.append([ letter[0] , 0, letter[1], im2.size[1] ])
print(bounds)
return bounds
if __name__=="__main__":
# if len(sys.argv) < 2:
# print(("usage: %s image" % (sys.argv[0])))
# sys.exit(2)
# file_name = sys.argv[1]
# img = Image.open(file_name).convert('P')
i = 0
while i < 3 :
response = requests.get(imgurl, headers = headers)
the_page = response.content
file = BytesIO(the_page)
img = Image.open(file)
# img = prepare(img)
img = img.resize((img.size[0]*4, img.size[1]*4), Image.BILINEAR)
img.show()
# bounds = separate(img)
bounds = vertical_cut(img)
_train(img, bounds)
i = i + 1
| KKfo/captcha_solver | experiment.py | Python | gpl-3.0 | 7,350 |
#
# This tree searcher uses the lab3 games framework
# to run alpha-beta searches on static game trees
# of the form seen in quiz/recitation/tutorial examples.
#
# (See TEST_1 for an example tree.)
#
# In the directory where lab3.py lives, run:
#
# ~> python tree_search.py
#
# But as prereq, your lab3.py should have def alpha_beta_search
# implemented, and your function signature conforms to the interface
# defined below:
#
# def alpha_beta_search(board, depth,
# eval_fn,
# get_next_moves_fn,
# is_terminal_fn):
#
# In context of tree searches:
#
# board is the current tree node.
#
# depth is the search depth. If you specify depth as a very large
# number then your search will end at the leaves of trees.
#
# def eval_fn(board):
# a function that returns a score for a given board from the
# perspective of the state's current player.
#
# def get_next_moves(board):
# a function that takes a current node (board) and generates
# all next (move, newboard) tuples.
#
# def is_terminal_fn(depth, board):
# is a function that checks whether to statically evaluate
# a board/node (hence terminating a search branch).
#
# You can modify the existing alpha_beta_search interface in lab3
# to work with this interface by definining your own is_terminal_fn
# using optional arguments, like so:
#
# def alpha_beta_search(board, depth,
# eval_fn,
# get_next_moves_fn=get_all_next_moves,
# is_terminal_fn=<your_terminal_function>):
class Node:
"""
Representation of a generic game tree node.
Each node holds
1. a label
2. a static value (internal nodes
generally have a None static value)
3. node type {MIN, MAX}
4. list of child nodes.
"""
def __init__(self, label, value, node_type, children=[]):
self.label = label
self.value = value
self.node_type = node_type
self.children = children
def set_children(self, child_nodes):
"""Set the children of this tree node"""
if not self.children:
self.children = []
for child in child_nodes:
self.children.append(child)
def get_children(self):
return self.children
def __str__(self):
"""Print the value of this node."""
if self.value is None:
return self.label
else:
return "%s[%s]" %(self.label, self.value)
def add(self, child):
"""Add children to this node."""
if not self.children:
self.children = []
self.children.append(child)
def num_children(self):
"""Find how many children this node has."""
if self.children:
return len(self.children)
else:
return 0
def tree_as_string(node, depth=0):
"""
Generates a string representation of the tree
in a space indented format
"""
static_value = tree_eval(node)
buf = "%s%s:%s\n" %(" "*depth, node.label, static_value)
for elt in node.children:
buf += tree_as_string(elt, depth+1)
return buf
def make_tree(tup):
"""
Generates a Node tree from a tuple formatted tree
"""
return make_tree_helper(tup, "MAX")
def make_tree_helper(tup, node_type):
"""Generate a Tree from tuple format"""
n = Node(tup[0], tup[1], node_type)
children = []
if len(tup) > 2:
if node_type == "MAX":
node_type = "MIN"
else:
node_type = "MAX"
for c in xrange(2,len(tup)):
children.append(make_tree_helper(tup[c], node_type))
n.set_children(children)
return n
def is_at_depth(depth, node):
"""
is_terminal_fn for fixed depth trees
True if depth == 0 has been reached.
"""
return depth <= 0
def is_leaf(depth, node):
"""
is_terminal_fn for variable-depth trees.
Check if a node is a leaf node.
"""
return node.num_children() == 0
def tree_get_next_move(node):
"""
get_next_move_fn for trees
Returns the list of next moves for traversing the tree
"""
return [(n.label, n) for n in node.children]
def tree_eval(node):
"""
Returns the static value of a node
"""
if node.value is not None:
if node.node_type == "MIN":
return -node.value
elif node.node_type == "MAX":
return node.value
else:
raise Exception("Unrecognized node type: %s" %(node.node_type))
else:
return None
def TEST_1(expected):
from lab3 import alpha_beta_search
tup_tree = ("A", None,
("B", None,
("C", None,
("D", 2),
("E", 2)),
("F", None,
("G", 0),
("H", 4))
),
("I", None,
("J", None,
("K", 6),
("L", 8)),
("M", None,
("N", 4),
("O", 6))
)
)
tree = make_tree(tup_tree)
print "%s:\n%s" %("TREE_1", tree_as_string(tree))
v = alpha_beta_search(tree, 10,
tree_eval,
tree_get_next_move,
is_leaf)
print "BEST MOVE: %s" %(v)
print "EXPECTED: %s" %(expected)
def TEST_2(expected):
from lab3 import alpha_beta_search
tup_tree = ("A", None,
("B", None,
("C", None,
("D", 6),
("E", 4)),
("F", None,
("G", 8),
("H", 6))
),
("I", None,
("J", None,
("K", 4),
("L", 0)),
("M", None,
("N", 2),
("O", 2))
)
)
tree = make_tree(tup_tree)
print "%s:\n%s" %("TREE_2", tree_as_string(tree))
v = alpha_beta_search(tree, 10,
tree_eval,
tree_get_next_move,
is_leaf)
print "BEST MOVE: %s" %(v)
print "EXPECTED: %s" %(expected)
def TEST_3(expected):
from lab3 import alpha_beta_search
tup_tree = ("A", None,
("B", None,
("E", None,
("K", 8),
("L", 2)),
("F", 6)
),
("C", None,
("G", None,
("M", None,
("S", 4),
("T", 5)),
("N", 3)),
("H", None,
("O", 9),
("P", None,
("U", 10),
("V", 8))
),
),
("D", None,
("I", 1),
("J", None,
("Q", None,
("W", 7),
("X", 12)),
("K", None,
("Y", 11),
("Z", 15)
),
)
)
)
tree = make_tree(tup_tree)
print "%s:\n%s" %("TREE_3",
tree_as_string(tree))
v = alpha_beta_search(tree, 10,
tree_eval,
tree_get_next_move,
is_leaf)
print "BEST-MOVE: %s" %(v)
print "EXPECTED: %s" %(expected)
if __name__ == "__main__":
# Run basic tests using trees.
TEST_1("I")
TEST_2("B")
TEST_3("B")
| popuguy/mit-cs-6-034 | lab3/tree_searcher.py | Python | unlicense | 6,307 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.