text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import re
import sqlalchemy as sa
from itertools import chain
from collections import Iterator
from datashape import (DataShape, Record, Option, var, dshape)
from datashape.predicates import isdimension, isrecord, isscalar
from datashape import discover
from datashape.dispatch import dispatch
from datetime import datetime, date
import datashape
from toolz import partition_all, keyfilter, first, pluck, memoize, map
from ..utils import keywords
from ..convert import convert, ooc_types
from ..append import append
from ..resource import resource
from ..chunks import Chunks
base = (int, float, datetime, date, bool, str)
# http://docs.sqlalchemy.org/en/latest/core/types.html
types = {'int64': sa.types.BigInteger,
'int32': sa.types.Integer,
'int': sa.types.Integer,
'int16': sa.types.SmallInteger,
'float32': sa.types.Float(precision=24), # sqlalchemy uses mantissa
'float64': sa.types.Float(precision=53), # for precision
'float': sa.types.Float(precision=53),
'real': sa.types.Float(precision=53),
'string': sa.types.Text,
'date': sa.types.Date,
'time': sa.types.Time,
'datetime': sa.types.DateTime,
'bool': sa.types.Boolean,
# ??: sa.types.LargeBinary,
# Decimal: sa.types.Numeric,
# ??: sa.types.PickleType,
# unicode: sa.types.Unicode,
# unicode: sa.types.UnicodeText,
# str: sa.types.Text, # ??
}
revtypes = dict(map(reversed, types.items()))
revtypes.update({sa.types.VARCHAR: 'string',
sa.types.String: 'string',
sa.types.Unicode: 'string',
sa.types.DATETIME: 'datetime',
sa.types.TIMESTAMP: 'datetime',
sa.types.FLOAT: 'float64',
sa.types.DATE: 'date',
sa.types.BIGINT: 'int64',
sa.types.INTEGER: 'int',
sa.types.NUMERIC: 'float64', # TODO: extend datashape to decimal
sa.types.BIGINT: 'int64',
sa.types.NullType: 'string',
sa.types.Float: 'float64'})
@discover.register(sa.sql.type_api.TypeEngine)
def discover_typeengine(typ):
if typ in revtypes:
return dshape(revtypes[typ])[0]
if type(typ) in revtypes:
return dshape(revtypes[type(typ)])[0]
else:
for k, v in revtypes.items():
if isinstance(k, type) and (isinstance(typ, k) or
hasattr(typ, 'impl') and isinstance(typ.impl, k)):
return v
if k == typ:
return v
raise NotImplementedError("No SQL-datashape match for type %s" % typ)
@discover.register(sa.Column)
def discover_sqlalchemy_column(col):
if col.nullable:
return Record([[col.name, Option(discover(col.type))]])
else:
return Record([[col.name, discover(col.type)]])
@discover.register(sa.Table)
def discover_sqlalchemy_table(t):
return var * Record(list(sum([discover(c).parameters[0] for c in t.columns], ())))
@memoize
def metadata_of_engine(engine):
metadata = sa.MetaData(engine)
return metadata
def create_engine(uri, *args, **kwargs):
if ':memory:' in uri:
return sa.create_engine(uri, *args, **kwargs)
else:
return memoized_create_engine(uri, *args, **kwargs)
memoized_create_engine = memoize(sa.create_engine)
@dispatch(sa.engine.base.Engine, str)
def discover(engine, tablename):
metadata = metadata_of_engine(engine)
if tablename not in metadata.tables:
metadata.reflect(engine, views=engine.dialect.supports_views)
table = metadata.tables[tablename]
return discover(table)
@dispatch(sa.engine.base.Engine)
def discover(engine):
metadata = metadata_of_engine(engine)
return discover(metadata)
@dispatch(sa.MetaData)
def discover(metadata):
metadata.reflect(views=metadata.bind.dialect.supports_views)
pairs = []
for name, table in sorted(metadata.tables.items(), key=first):
try:
pairs.append([name, discover(table)])
except sa.exc.CompileError as e:
print("Can not discover type of table %s.\n" % name +
"SQLAlchemy provided this error message:\n\t%s" % e.message +
"\nSkipping.")
except NotImplementedError as e:
print("Blaze does not understand a SQLAlchemy type.\n"
"Blaze provided the following error:\n\t%s" % e.message +
"\nSkipping.")
return DataShape(Record(pairs))
@discover.register(sa.engine.RowProxy)
def discover_row_proxy(rp):
return Record(list(zip(rp.keys(), map(discover, rp.values()))))
def dshape_to_table(name, ds, metadata=None):
"""
Create a SQLAlchemy table from a datashape and a name
>>> dshape_to_table('bank', '{name: string, amount: int}') # doctest: +NORMALIZE_WHITESPACE
Table('bank', MetaData(bind=None),
Column('name', Text(), table=<bank>, nullable=False),
Column('amount', Integer(), table=<bank>, nullable=False),
schema=None)
"""
if isinstance(ds, str):
ds = dshape(ds)
metadata = metadata or sa.MetaData()
cols = dshape_to_alchemy(ds)
return sa.Table(name, metadata, *cols)
@dispatch(object, str)
def create_from_datashape(o, ds, **kwargs):
return create_from_datashape(o, dshape(ds), **kwargs)
@dispatch(sa.engine.base.Engine, DataShape)
def create_from_datashape(engine, ds, **kwargs):
assert isrecord(ds)
metadata = metadata_of_engine(engine)
for name, sub_ds in ds[0].dict.items():
t = dshape_to_table(name, sub_ds, metadata=metadata)
t.create()
return engine
def dshape_to_alchemy(dshape):
"""
>>> dshape_to_alchemy('int')
<class 'sqlalchemy.sql.sqltypes.Integer'>
>>> dshape_to_alchemy('string')
<class 'sqlalchemy.sql.sqltypes.Text'>
>>> dshape_to_alchemy('{name: string, amount: int}')
[Column('name', Text(), table=None, nullable=False), Column('amount', Integer(), table=None, nullable=False)]
>>> dshape_to_alchemy('{name: ?string, amount: ?int}')
[Column('name', Text(), table=None), Column('amount', Integer(), table=None)]
"""
if isinstance(dshape, str):
dshape = datashape.dshape(dshape)
if isinstance(dshape, Option):
return dshape_to_alchemy(dshape.ty)
if str(dshape) in types:
return types[str(dshape)]
if isinstance(dshape, datashape.Record):
return [sa.Column(name,
dshape_to_alchemy(typ),
nullable=isinstance(typ[0], Option))
for name, typ in dshape.parameters[0]]
if isinstance(dshape, datashape.DataShape):
if isdimension(dshape[0]):
return dshape_to_alchemy(dshape[1])
else:
return dshape_to_alchemy(dshape[0])
if isinstance(dshape, datashape.String):
if dshape[0].fixlen is None:
return sa.types.Text
if 'U' in dshape.encoding:
return sa.types.Unicode(length=dshape[0].fixlen)
if 'A' in dshape.encoding:
return sa.types.String(length=dshape[0].fixlen)
if isinstance(dshape, datashape.DateTime):
if dshape.tz:
return sa.types.DateTime(timezone=True)
else:
return sa.types.DateTime(timezone=False)
raise NotImplementedError("No SQLAlchemy dtype match for datashape: %s"
% dshape)
@convert.register(Iterator, sa.Table, cost=300.0)
def sql_to_iterator(t, **kwargs):
engine = t.bind
with engine.connect() as conn:
result = conn.execute(sa.sql.select([t]))
for item in result:
yield item
@convert.register(Iterator, sa.sql.Select, cost=300.0)
def select_to_iterator(sel, dshape=None, **kwargs):
engine = sel.bind # TODO: get engine from select
with engine.connect() as conn:
result = conn.execute(sel)
if dshape and isscalar(dshape.measure):
result = pluck(0, result)
else:
result = map(tuple, result) # Turn RowProxy into tuple
for item in result:
yield item
@convert.register(base, sa.sql.Select, cost=300.0)
def select_to_base(sel, dshape=None, **kwargs):
engine = sel.bind # TODO: get engine from select
with engine.connect() as conn:
result = conn.execute(sel)
assert not dshape or isscalar(dshape)
result = list(result)[0][0]
return result
@append.register(sa.Table, Iterator)
def append_iterator_to_table(t, rows, dshape=None, **kwargs):
assert not isinstance(t, type)
rows = iter(rows)
# We see if the sequence is of tuples or dicts
# If tuples then we coerce them to dicts
try:
row = next(rows)
except StopIteration:
return
rows = chain([row], rows)
if isinstance(row, (tuple, list)):
if dshape and isinstance(dshape.measure, datashape.Record):
names = dshape.measure.names
if not set(names) == set(discover(t).measure.names):
raise ValueError("Column names of incoming data don't match "
"column names of existing SQL table\n"
"Names in SQL table: %s\n"
"Names from incoming data: %s\n" %
(discover(t).measure.names, names))
else:
names = discover(t).measure.names
rows = (dict(zip(names, row)) for row in rows)
engine = t.bind
with engine.connect() as conn:
for chunk in partition_all(1000, rows): # TODO: 1000 is hardcoded
conn.execute(t.insert(), chunk)
return t
@append.register(sa.Table, Chunks)
def append_anything_to_sql_Table(t, c, **kwargs):
for item in c:
append(t, item, **kwargs)
@append.register(sa.Table, object)
def append_anything_to_sql_Table(t, o, **kwargs):
return append(t, convert(Iterator, o, **kwargs), **kwargs)
@append.register(sa.Table, sa.sql.Select)
def append_select_statement_to_sql_Table(t, o, **kwargs):
assert o.bind.has_table(t.name), 'tables must come from the same database'
query = t.insert().from_select(o.columns.keys(), o)
with o.bind.connect() as conn:
conn.execute(query)
return t
@resource.register('(.*sql.*|oracle|redshift)(\+\w+)?://.+')
def resource_sql(uri, *args, **kwargs):
kwargs2 = keyfilter(keywords(sa.create_engine).__contains__, kwargs)
engine = create_engine(uri, **kwargs2)
ds = kwargs.get('dshape')
if args and isinstance(args[0], str):
table_name, args = args[0], args[1:]
metadata = metadata_of_engine(engine)
metadata.reflect(views=engine.dialect.supports_views)
if table_name not in metadata.tables:
if ds:
t = dshape_to_table(table_name, ds, metadata)
t.create()
return t
else:
raise ValueError("Table does not exist and no dshape provided")
return metadata.tables[table_name]
if ds:
create_from_datashape(engine, ds)
return engine
@resource.register('impala://.+')
def resource_impala(uri, *args, **kwargs):
try:
import impala.sqlalchemy
except ImportError:
raise ImportError("Please install or update `impyla` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('monetdb://.+')
def resource_monet(uri, *args, **kwargs):
try:
import monetdb
except ImportError:
raise ImportError("Please install the `sqlalchemy_monetdb` library")
return resource_sql(uri, *args, **kwargs)
@resource.register('hive://.+')
def resource_hive(uri, *args, **kwargs):
try:
import pyhive
except ImportError:
raise ImportError("Please install the `PyHive` library.")
pattern = 'hive://((?P<user>[a-zA-Z_]\w*)@)?(?P<host>[\w.]+)(:(?P<port>\d*))?(/(?P<database>\w*))?'
d = re.search(pattern, uri.split('::')[0]).groupdict()
defaults = {'port': '10000',
'user': 'hdfs',
'database': 'default'}
for k, v in d.items():
if not v:
d[k] = defaults[k]
if d['user']:
d['user'] = d['user'] + '@'
uri2 = 'hive://%(user)s%(host)s:%(port)s/%(database)s' % d
if '::' in uri:
uri2 = uri2 + '::' + uri.split('::')[1]
return resource_sql(uri2, *args, **kwargs)
ooc_types.add(sa.Table)
@dispatch(sa.Table)
def drop(table):
table.drop(table.bind)
| {
"repo_name": "mrocklin/into",
"path": "into/backends/sql.py",
"copies": "1",
"size": "12558",
"license": "bsd-3-clause",
"hash": 8705936787350652000,
"line_mean": 31.2827763496,
"line_max": 113,
"alpha_frac": 0.6148272018,
"autogenerated": false,
"ratio": 3.5757403189066057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46905675207066055,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
import stat
import sys
from subprocess import Popen, check_output, PIPE, STDOUT, CalledProcessError
import os
from conda_build.os_utils.pyldd import inspect_rpaths
from conda_build import utils
from itertools import islice
from conda_build.os_utils.external import find_preferably_prefixed_executable
NO_EXT = (
'.py', '.pyc', '.pyo', '.h', '.a', '.c', '.txt', '.html',
'.xml', '.png', '.jpg', '.gif', '.class',
)
MAGIC = {
b'\xca\xfe\xba\xbe': 'MachO-universal',
b'\xce\xfa\xed\xfe': 'MachO-i386',
b'\xcf\xfa\xed\xfe': 'MachO-x86_64',
b'\xfe\xed\xfa\xce': 'MachO-ppc',
b'\xfe\xed\xfa\xcf': 'MachO-ppc64',
}
FILETYPE = {
1: 'MH_OBJECT',
2: 'MH_EXECUTE',
3: 'MH_FVMLIB',
4: 'MH_CORE',
5: 'MH_PRELOAD',
6: 'MH_DYLIB',
7: 'MH_DYLINKER',
8: 'MH_BUNDLE',
9: 'MH_DYLIB_STUB',
10: 'MH_DSYM',
11: 'MH_KEXT_BUNDLE',
}
def is_macho(path):
if path.endswith(NO_EXT) or os.path.islink(path) or not os.path.isfile(path):
return False
with open(path, 'rb') as fi:
head = fi.read(4)
return bool(head in MAGIC)
def is_dylib(path, build_prefix):
return human_filetype(path) == 'DYLIB'
def human_filetype(path, build_prefix):
otool = find_apple_cctools_executable('otool', build_prefix)
output = check_output((otool, '-h', path)).decode('utf-8')
lines = output.splitlines()
if not lines[0].startswith((path, 'Mach header')):
raise ValueError(
'Expected `otool -h` output to start with'
' Mach header or {0}, got:\n{1}'.format(path, output)
)
assert lines[0].startswith((path, 'Mach header')), path
for line in lines:
if line.strip().startswith('0x'):
header = line.split()
filetype = int(header[4])
return FILETYPE[filetype][3:]
def is_dylib_info(lines):
dylib_info = ('LC_ID_DYLIB', 'LC_LOAD_DYLIB')
if len(lines) > 1 and lines[1].split()[1] in dylib_info:
return True
return False
def is_id_dylib(lines):
if len(lines) > 1 and lines[1].split()[1] == 'LC_ID_DYLIB':
return True
return False
def is_load_dylib(lines):
if len(lines) > 1 and lines[1].split()[1] == 'LC_LOAD_DYLIB':
return True
return False
def is_rpath(lines):
if len(lines) > 1 and lines[1].split()[1] == 'LC_RPATH':
return True
return False
def _get_load_commands(lines):
"""yields each load command from the output of otool -l"""
a = 1 # first line is the filename.
for ln, line in enumerate(lines):
if line.startswith("Load command"):
if a < ln:
yield lines[a:ln]
a = ln
yield lines[a:]
def _get_matching_load_commands(lines, cb_filter):
"""Workhorse function for otool
Does the work of filtering load commands and making a list
of dicts. The logic for splitting the free-form lines into
keys and values in entirely encoded here. Values that can
be converted to ints are converted to ints.
"""
result = []
for lcmds in _get_load_commands(lines):
if cb_filter(lcmds):
lcdict = {}
for line in islice(lcmds, 1, len(lcmds)):
listy = line.split()
# This could be prettier, but what we need it to handle
# is fairly simple so let's just hardcode it for speed.
if len(listy) == 2:
key, value = listy
elif listy[0] == 'name' or listy[0] == 'path':
# Create an entry for 'name offset' if there is one
# as that can be useful if we need to know if there
# is space to patch it for relocation purposes.
if listy[2] == '(offset':
key = listy[0] + ' offset'
value = int(listy[3][:-1])
lcdict[key] = value
key, value = listy[0:2]
elif listy[0] == 'time':
key = ' '.join(listy[0:3])
value = ' '.join(listy[3:])
elif listy[0] in ('current', 'compatibility'):
key = ' '.join(listy[0:2])
value = listy[2]
try:
value = int(value)
except ValueError:
pass
lcdict[key] = value
result.append(lcdict)
return result
def find_apple_cctools_executable(name, build_prefix, nofail=False):
tools = find_preferably_prefixed_executable(name, build_prefix, all_matches=True)
for tool in tools:
try:
if '/usr/bin' in tool:
with open(tool, 'rb') as f:
s = f.read()
if s.find(b'usr/lib/libxcselect.dylib') != -1:
# We ask xcrun.
tool_xcr = check_output(['xcrun', '-find', name], stderr=STDOUT).decode('utf-8').splitlines()[0]
# print("WARNING :: Found `{}` but is is an Apple Xcode stub executable.".format(tool))
# This is not the real tool, but Apple's irritating GUI dialog launcher.
tool = tool_xcr
if os.path.exists(tool):
return tool
except Exception as _: # noqa
print("ERROR :: Failed to run `{}`. Please use `conda` to install `cctools` into your base environment.\n"
" An alternative option for users of macOS is to install `Xcode` or `Command Line Tools for Xcode`."
.format(tool))
sys.exit(1)
return tool
def otool(path, build_prefix=None, cb_filter=is_dylib_info):
"""A wrapper around otool -l
Parse the output of the otool -l 'load commands', filtered by
cb_filter, returning a list of dictionairies for the records.
cb_filter receives the whole load command record, including the
first line, the 'Load Command N' one. All the records have been
pre-stripped of white space.
The output of otool -l is entirely freeform; delineation between
key and value doesn't formally exist, so that is hard coded. I
didn't want to use regexes to parse it for speed purposes.
Any key values that can be converted to integers are converted
to integers, the rest are strings.
"""
otool = find_apple_cctools_executable('otool', build_prefix)
lines = check_output([otool, '-l', path],
stderr=STDOUT).decode('utf-8')
# llvm-objdump returns 0 for some things that are anything but successful completion.
lines_split = lines.splitlines()
# 'invalid', 'expected' and 'unexpected' are too generic
# here so also check that we do not get 'useful' output.
if len(lines_split) < 10 and (re.match('.*(is not a Mach-O|invalid|expected|unexpected).*',
lines, re.MULTILINE)):
raise CalledProcessError
return _get_matching_load_commands(lines_split, cb_filter)
def get_dylibs(path, build_prefix=None):
"""Return a list of the loaded dylib pathnames"""
dylib_loads = otool(path, build_prefix, is_load_dylib)
return [dylib_load['name'] for dylib_load in dylib_loads]
def get_id(path, build_prefix=None):
"""Returns the id name of the Mach-O file `path` or an empty string"""
dylib_loads = otool(path, build_prefix, is_id_dylib)
try:
return [dylib_load['name'] for dylib_load in dylib_loads][0]
except:
return ''
def get_rpaths(path):
"""Return a list of the dylib rpaths"""
res_pyldd = inspect_rpaths(path, resolve_dirnames=False, use_os_varnames=True)
return res_pyldd
def _chmod(filename, mode):
try:
os.chmod(filename, mode)
except (OSError, utils.PermissionError) as e:
log = utils.get_logger(__name__)
log.warn(str(e))
def install_name_tool(args, build_prefix=None, verbose=False):
args_full = [find_apple_cctools_executable('install_name_tool', build_prefix)]
args_full.extend(args)
if verbose:
print(' '.join(args_full))
old_mode = stat.S_IMODE(os.stat(args[-1]).st_mode)
new_mode = old_mode | stat.S_IWUSR
if old_mode != new_mode:
_chmod(args[-1], new_mode)
subproc = Popen(args_full, stdout=PIPE, stderr=PIPE)
out, err = subproc.communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
if old_mode != new_mode:
_chmod(args[-1], old_mode)
return subproc.returncode, out, err
def add_rpath(path, rpath, build_prefix=None, verbose=False):
"""Add an `rpath` to the Mach-O file at `path`"""
args = ['-add_rpath', rpath, path]
code, _, stderr = install_name_tool(args, build_prefix)
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s\n" % path)
return
elif "would duplicate path, file already has LC_RPATH for:" in stderr:
print("Skipping -add_rpath, file already has LC_RPATH set")
return
else:
print(stderr, file=sys.stderr)
if code:
raise RuntimeError("install_name_tool failed with exit status %d"
% code)
def delete_rpath(path, rpath, verbose=False):
"""Delete an `rpath` from the Mach-O file at `path`"""
args = ['-delete_rpath', rpath, path]
code, _, stderr = install_name_tool(args)
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s\n" % path)
return
elif "no LC_RPATH load command with path:" in stderr:
print("Skipping -delete_rpath, file doesn't contain that LC_RPATH")
return
else:
print(stderr, file=sys.stderr)
if code:
raise RuntimeError("install_name_tool failed with exit status %d"
% code)
def install_name_change(path, build_prefix, cb_func, dylibs, verbose=False):
"""Change dynamic shared library load name or id name of Mach-O Binary `path`.
`cb_func` is called for each shared library load command. The dictionary of
the load command is passed in and the callback returns the new name or None
if the name should be unchanged.
When dealing with id load commands, `install_name_tool -id` is used.
When dealing with dylib load commands `install_name_tool -change` is used.
"""
changes = []
for index, dylib in enumerate(dylibs):
new_name = cb_func(path, dylib)
if new_name:
changes.append((index, new_name))
ret = True
for index, new_name in changes:
args = []
if dylibs[index]['cmd'] == 'LC_ID_DYLIB':
args.extend(('-id', new_name, path))
else:
args.extend(('-change', dylibs[index]['name'], new_name, path))
code, _, stderr = install_name_tool(args, build_prefix)
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s" % path)
ret = False
continue
else:
print(stderr, file=sys.stderr)
if code:
raise RuntimeError("install_name_tool failed with exit status %d, stderr of:\n%s"
% (code, stderr))
return ret
if __name__ == '__main__':
if sys.platform == 'darwin':
for path in '/bin/ls', '/etc/locate.rc':
print(path, is_macho(path))
| {
"repo_name": "pelson/conda-build",
"path": "conda_build/os_utils/macho.py",
"copies": "1",
"size": "11541",
"license": "bsd-3-clause",
"hash": 1794661026006849800,
"line_mean": 34.7306501548,
"line_max": 126,
"alpha_frac": 0.5898968894,
"autogenerated": false,
"ratio": 3.604309806371018,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9688135659460811,
"avg_score": 0.0012142072620413282,
"num_lines": 323
} |
from __future__ import absolute_import, division, print_function
import re
import string
from conda_build import exceptions
from conda_build.utils import comma_join
allowed_license_families = """
AGPL
LGPL
GPL3
GPL2
GPL
BSD
MIT
APACHE
PSF
CC
MOZILLA
PUBLIC-DOMAIN
PROPRIETARY
OTHER
NONE
""".split()
# regular expressions
gpl2_regex = re.compile('GPL[^3]*2') # match GPL2
gpl3_regex = re.compile('GPL[^2]*3') # match GPL3
gpl23_regex = re.compile('GPL[^2]*>= *2') # match GPL >= 2
cc_regex = re.compile('CC\w+') # match CC
punk_regex = re.compile('[%s]' % re.escape(string.punctuation)) # removes punks
def match_gpl3(family):
"""True if family matches GPL3 or GPL >= 2, else False"""
return (gpl23_regex.search(family) or
gpl3_regex.search(family))
def normalize(s):
"""Set to ALL CAPS, replace common GPL patterns, and strip"""
s = s.upper()
s = re.sub('GENERAL PUBLIC LICENSE', 'GPL', s)
s = re.sub('LESSER *', 'L', s)
s = re.sub('AFFERO *', 'A', s)
return s.strip()
def remove_special_characters(s):
"""Remove punctuation, spaces, tabs, and line feeds"""
s = punk_regex.sub(' ', s)
s = re.sub('\s+', '', s)
return s
def guess_license_family_from_index(index=None,
recognized=allowed_license_families):
"""Return best guess of license_family from the conda package index.
Note: Logic here is simple, and focuses on existing set of allowed families
"""
if isinstance(index, dict):
license_name = index.get('license_family', index.get('license'))
else: # index argument is actually a string
license_name = index
return guess_license_family(license_name, recognized)
def guess_license_family(license_name=None,
recognized=allowed_license_families):
"""Return best guess of license_family from the conda package index.
Note: Logic here is simple, and focuses on existing set of allowed families
"""
if license_name is None:
return 'NONE'
license_name = normalize(license_name)
# Handle GPL families as special cases
# Remove AGPL and LGPL before looking for GPL2 and GPL3
sans_lgpl = re.sub('[A,L]GPL', '', license_name)
if match_gpl3(sans_lgpl):
return 'GPL3'
elif gpl2_regex.search(sans_lgpl):
return 'GPL2'
elif cc_regex.search(license_name):
return 'CC'
license_name = remove_special_characters(license_name)
for family in recognized:
if remove_special_characters(family) in license_name:
return family
for family in recognized:
if license_name in remove_special_characters(family):
return family
return 'OTHER'
def ensure_valid_license_family(meta):
try:
license_family = meta['about']['license_family']
except KeyError:
return
allowed_families = [remove_special_characters(normalize(fam))
for fam in allowed_license_families]
if remove_special_characters(normalize(license_family)) not in allowed_families:
raise RuntimeError(exceptions.indent(
"about/license_family '%s' not allowed. Allowed families are %s." %
(license_family, comma_join(sorted(allowed_license_families)))))
| {
"repo_name": "pelson/conda-build",
"path": "conda_build/license_family.py",
"copies": "3",
"size": "3300",
"license": "bsd-3-clause",
"hash": -6495837257669316000,
"line_mean": 28.203539823,
"line_max": 84,
"alpha_frac": 0.65,
"autogenerated": false,
"ratio": 3.642384105960265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008525815140890712,
"num_lines": 113
} |
from __future__ import absolute_import, division, print_function
import re
import subprocess
import json
from os.path import join
from conda.install import rm_rf
from conda.utils import memoized
from conda_build import post
from conda_build.config import config
from conda_build.build import create_env
LDD_RE = re.compile(r'\s*(.*?)\s*=>\s*(.*?)\s*\(.*\)')
LDD_NOT_FOUND_RE = re.compile(r'\s*(.*?)\s*=>\s*not found')
def ldd(path):
"thin wrapper around ldd"
lines = subprocess.check_output(['ldd', path]).decode('utf-8').splitlines()
res = []
for line in lines:
if '=>' not in line:
continue
assert line[0] == '\t', (path, line)
m = LDD_RE.match(line)
if m:
res.append(m.groups())
continue
m = LDD_NOT_FOUND_RE.match(line)
if m:
res.append((m.group(1), 'not found'))
continue
if 'ld-linux' in line:
continue
raise RuntimeError("Unexpected output from ldd: %s" % line)
return res
@memoized
def get_package_linkages(dist, prefix):
with open(join(prefix, 'conda-meta', dist +
'.json')) as f:
data = json.load(f)
res = {}
files = data['files']
for f in files:
path = join(prefix, f)
if post.is_obj(path):
res[f] = ldd(path)
return res
| {
"repo_name": "takluyver/conda-build",
"path": "conda_build/ldd.py",
"copies": "1",
"size": "1363",
"license": "bsd-3-clause",
"hash": 8212627274698076000,
"line_mean": 23.7818181818,
"line_max": 79,
"alpha_frac": 0.5766691123,
"autogenerated": false,
"ratio": 3.340686274509804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9394628114082532,
"avg_score": 0.004545454545454545,
"num_lines": 55
} |
from __future__ import absolute_import, division, print_function
import re
import textwrap
from distutils.version import LooseVersion
from collections import Iterator
import sys
import traceback
from contextlib import contextmanager
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.api.types import is_categorical_dtype, is_scalar
try:
from pandas.api.types import is_datetime64tz_dtype
except ImportError:
# pandas < 0.19.2
from pandas.core.common import is_datetime64tz_dtype
from ..core import get_deps
from ..local import get_sync
PANDAS_VERSION = LooseVersion(pd.__version__)
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if is_categorical_dtype(index):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[:indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i]: indices[i + 1]]
yield df.iloc[indices[-1]:]
_META_TYPES = "meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional"
_META_DESCRIPTION = """\
An empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and
column names of the output. This metadata is necessary for many algorithms
in dask dataframe to work. For ease of use, some alternative inputs are
also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}``
or iterable of ``(name, dtype)`` can be provided. Instead of a series, a
tuple of ``(name, dtype)`` can be used. If not provided, dask will try to
infer the metadata. This may lead to unexpected results, so providing
``meta`` is recommended. For more information, see
``dask.dataframe.utils.make_meta``.
"""
def insert_meta_param_description(*args, **kwargs):
"""Replace `$META` in docstring with param description.
If pad keyword is provided, will pad description by that number of
spaces (default is 8)."""
if not args:
return lambda f: insert_meta_param_description(f, **kwargs)
f = args[0]
indent = " " * kwargs.get('pad', 8)
body = textwrap.wrap(_META_DESCRIPTION, initial_indent=indent,
subsequent_indent=indent, width=78)
descr = '{0}\n{1}'.format(_META_TYPES, '\n'.join(body))
if f.__doc__:
if '$META' in f.__doc__:
f.__doc__ = f.__doc__.replace('$META', descr)
else:
# Put it at the end of the parameters section
parameter_header = 'Parameters\n%s----------' % indent[4:]
first, last = re.split('Parameters\\n[ ]*----------', f.__doc__)
parameters, rest = last.split('\n\n', 1)
f.__doc__ = '{0}{1}{2}\n{3}{4}\n\n{5}'.format(first, parameter_header,
parameters, indent[4:],
descr, rest)
return f
@contextmanager
def raise_on_meta_error(funcname=None):
"""Reraise errors in this block to show metadata inference failure.
Parameters
----------
funcname : str, optional
If provided, will be added to the error message to indicate the
name of the method that failed.
"""
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
msg = ("Metadata inference failed{0}.\n\n"
"Original error is below:\n"
"------------------------\n"
"{1}\n\n"
"Traceback:\n"
"---------\n"
"{2}"
).format(" in `{0}`".format(funcname) if funcname else "",
repr(e), tb)
raise ValueError(msg)
UNKNOWN_CATEGORIES = '__UNKNOWN_CATEGORIES__'
def has_known_categories(x):
"""Returns whether the categories in `x` are known.
Parameters
----------
x : Series or CategoricalIndex
"""
x = getattr(x, '_meta', x)
if isinstance(x, pd.Series):
return UNKNOWN_CATEGORIES not in x.cat.categories
elif isinstance(x, pd.CategoricalIndex):
return UNKNOWN_CATEGORIES not in x.categories
raise TypeError("Expected Series or CategoricalIndex")
def strip_unknown_categories(x):
"""Replace any unknown categoricals with empty categoricals.
Useful for preventing ``UNKNOWN_CATEGORIES`` from leaking into results.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
cat_mask = x.dtypes == 'category'
if cat_mask.any():
cats = cat_mask[cat_mask].index
for c in cats:
if not has_known_categories(x[c]):
x[c].cat.set_categories([], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype) and not has_known_categories(x):
x.cat.set_categories([], inplace=True)
if (isinstance(x.index, pd.CategoricalIndex) and not
has_known_categories(x.index)):
x.index = x.index.set_categories([])
elif isinstance(x, pd.CategoricalIndex) and not has_known_categories(x):
x = x.set_categories([])
return x
def clear_known_categories(x, cols=None, index=True):
"""Set categories to be unknown.
Parameters
----------
x : DataFrame, Series, Index
cols : iterable, optional
If x is a DataFrame, set only categoricals in these columns to unknown.
By default, all categorical columns are set to unknown categoricals
index : bool, optional
If True and x is a Series or DataFrame, set the clear known categories
in the index as well.
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
x = x.copy()
if isinstance(x, pd.DataFrame):
mask = x.dtypes == 'category'
if cols is None:
cols = mask[mask].index
elif not mask.loc[cols].all():
raise ValueError("Not all columns are categoricals")
for c in cols:
x[c].cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
elif isinstance(x, pd.Series):
if is_categorical_dtype(x.dtype):
x.cat.set_categories([UNKNOWN_CATEGORIES], inplace=True)
if index and isinstance(x.index, pd.CategoricalIndex):
x.index = x.index.set_categories([UNKNOWN_CATEGORIES])
elif isinstance(x, pd.CategoricalIndex):
x = x.set_categories([UNKNOWN_CATEGORIES])
return x
def _empty_series(name, dtype, index=None):
if isinstance(dtype, str) and dtype == 'category':
return pd.Series(pd.Categorical([UNKNOWN_CATEGORIES]),
name=name, index=index).iloc[:0]
return pd.Series([], dtype=dtype, name=name, index=index)
def make_meta(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')])
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8'))
Series([], Name: a, dtype: float64)
>>> make_meta('i8')
1
"""
if hasattr(x, '_meta'):
return x._meta
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.iloc[0:0]
elif isinstance(x, pd.Index):
return x[0:0]
index = index if index is None else index[0:0]
if isinstance(x, dict):
return pd.DataFrame({c: _empty_series(c, d, index=index)
for (c, d) in x.items()}, index=index)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError("Expected iterable of tuples of (name, dtype), "
"got {0}".format(x))
return pd.DataFrame({c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x], index=index)
elif not hasattr(x, 'dtype') and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except:
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in (pd.Int64Index, pd.Float64Index):
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(['a', 'b'], name=idx.name)
elif typ is pd.DatetimeIndex:
start = '1970-01-01'
data = [start, start] if idx.freq is None else None
return pd.DatetimeIndex(data, start=start, periods=2, freq=idx.freq,
tz=idx.tz, name=idx.name)
elif typ is pd.PeriodIndex:
return pd.PeriodIndex(start='1970-01-01', periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, 'D')
data = [start, start] if idx.freq is None else None
return pd.TimedeltaIndex(data, start=start, periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.CategoricalIndex:
if len(idx.categories):
data = [idx.categories[0]] * 2
cats = idx.categories
else:
data = _nonempty_index(idx.categories)
cats = None
return pd.CategoricalIndex(data, categories=cats,
ordered=idx.ordered, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(i) for i in idx.levels]
labels = [[0, 0] for i in idx.levels]
return pd.MultiIndex(levels=levels, labels=labels, names=idx.names)
raise TypeError("Don't know how to handle index of "
"type {0}".format(type(idx).__name__))
_simple_fake_mapping = {
'b': np.bool_(True),
'V': np.void(b' '),
'M': np.datetime64('1970-01-01'),
'm': np.timedelta64(1),
'S': np.str_('foo'),
'a': np.str_('foo'),
'U': np.unicode_('foo'),
'O': 'foo'
}
def _scalar_from_dtype(dtype):
if dtype.kind in ('i', 'f', 'u'):
return dtype.type(1)
elif dtype.kind == 'c':
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ('m', 'M') else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
def _nonempty_scalar(x):
if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)):
return x
elif np.isscalar(x):
dtype = x.dtype if hasattr(x, 'dtype') else np.dtype(type(x))
return _scalar_from_dtype(dtype)
else:
raise TypeError("Can't handle meta of type "
"'{0}'".format(type(x).__name__))
def _nonempty_series(s, idx):
dtype = s.dtype
if is_datetime64tz_dtype(dtype):
entry = pd.Timestamp('1970-01-01', tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
if len(s.cat.categories):
data = [s.cat.categories[0]] * 2
cats = s.cat.categories
else:
data = _nonempty_index(s.cat.categories)
cats = None
data = pd.Categorical(data, categories=cats,
ordered=s.cat.ordered)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
return pd.Series(data, name=s.name, index=idx)
def meta_nonempty(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if isinstance(x, pd.Index):
return _nonempty_index(x)
elif isinstance(x, pd.Series):
idx = _nonempty_index(x.index)
return _nonempty_series(x, idx)
elif isinstance(x, pd.DataFrame):
idx = _nonempty_index(x.index)
data = {i: _nonempty_series(x.iloc[:, i], idx)
for i, c in enumerate(x.columns)}
res = pd.DataFrame(data, index=idx,
columns=np.arange(len(x.columns)))
res.columns = x.columns
return res
elif is_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError("Expected Index, Series, DataFrame, or scalar, "
"got {0}".format(type(x).__name__))
###############################################################
# Testing
###############################################################
def _check_dask(dsk, check_names=True, check_dtypes=True, result=None):
import dask.dataframe as dd
if hasattr(dsk, 'dask'):
if result is None:
result = dsk.compute(get=get_sync)
if isinstance(dsk, dd.Index):
assert isinstance(result, pd.Index), type(result)
assert isinstance(dsk._meta, pd.Index), type(dsk._meta)
if check_names:
assert dsk.name == result.name
assert dsk._meta.name == result.name
if isinstance(result, pd.MultiIndex):
assert result.names == dsk._meta.names
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.Series):
assert isinstance(result, pd.Series), type(result)
assert isinstance(dsk._meta, pd.Series), type(dsk._meta)
if check_names:
assert dsk.name == result.name, (dsk.name, result.name)
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
_check_dask(dsk.index, check_names=check_names,
check_dtypes=check_dtypes, result=result.index)
elif isinstance(dsk, dd.DataFrame):
assert isinstance(result, pd.DataFrame), type(result)
assert isinstance(dsk.columns, pd.Index), type(dsk.columns)
assert isinstance(dsk._meta, pd.DataFrame), type(dsk._meta)
if check_names:
tm.assert_index_equal(dsk.columns, result.columns)
tm.assert_index_equal(dsk._meta.columns, result.columns)
if check_dtypes:
assert_dask_dtypes(dsk, result)
_check_dask(dsk.index, check_names=check_names,
check_dtypes=check_dtypes, result=result.index)
elif isinstance(dsk, dd.core.Scalar):
assert (np.isscalar(result) or
isinstance(result, (pd.Timestamp, pd.Timedelta)))
if check_dtypes:
assert_dask_dtypes(dsk, result)
else:
msg = 'Unsupported dask instance {0} found'.format(type(dsk))
raise AssertionError(msg)
return result
return dsk
def _maybe_sort(a):
# sort by value, then index
try:
if isinstance(a, pd.DataFrame):
a = a.sort_values(by=a.columns.tolist())
else:
a = a.sort_values()
except (TypeError, IndexError, ValueError):
pass
return a.sort_index()
def assert_eq(a, b, check_names=True, check_dtypes=True,
check_divisions=True, check_index=True, **kwargs):
if check_divisions:
assert_divisions(a)
assert_divisions(b)
if hasattr(a, 'divisions') and hasattr(b, 'divisions'):
at = type(np.asarray(a.divisions).tolist()[0]) # numpy to python
bt = type(np.asarray(b.divisions).tolist()[0]) # scalar conversion
assert at == bt, (at, bt)
assert_sane_keynames(a)
assert_sane_keynames(b)
a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes)
b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes)
if not check_index:
a = a.reset_index(drop=True)
b = b.reset_index(drop=True)
if isinstance(a, pd.DataFrame):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_frame_equal(a, b, **kwargs)
elif isinstance(a, pd.Series):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_series_equal(a, b, check_names=check_names, **kwargs)
elif isinstance(a, pd.Index):
tm.assert_index_equal(a, b, **kwargs)
else:
if a == b:
return True
else:
if np.isnan(a):
assert np.isnan(b)
else:
assert np.allclose(a, b)
return True
def assert_dask_graph(dask, label):
if hasattr(dask, 'dask'):
dask = dask.dask
assert isinstance(dask, dict)
for k in dask:
if isinstance(k, tuple):
k = k[0]
if k.startswith(label):
return True
else:
msg = "given dask graph doesn't contan label: {0}"
raise AssertionError(msg.format(label))
def assert_divisions(ddf):
if not hasattr(ddf, 'divisions'):
return
if not hasattr(ddf, 'index'):
return
if not ddf.known_divisions:
return
index = lambda x: x if isinstance(x, pd.Index) else x.index
results = get_sync(ddf.dask, ddf._keys())
for i, df in enumerate(results[:-1]):
if len(df):
assert index(df).min() >= ddf.divisions[i]
assert index(df).max() < ddf.divisions[i + 1]
if len(results[-1]):
assert index(results[-1]).min() >= ddf.divisions[-2]
assert index(results[-1]).max() <= ddf.divisions[-1]
def assert_sane_keynames(ddf):
if not hasattr(ddf, 'dask'):
return
for k in ddf.dask.keys():
while isinstance(k, tuple):
k = k[0]
assert isinstance(k, (str, bytes))
assert len(k) < 100
assert ' ' not in k
if sys.version_info[0] >= 3:
assert k.split('-')[0].isidentifier()
def assert_dask_dtypes(ddf, res, numeric_equal=True):
"""Check that the dask metadata matches the result.
If `numeric_equal`, integer and floating dtypes compare equal. This is
useful due to the implicit conversion of integer to floating upon
encountering missingness, which is hard to infer statically."""
eq_types = {'O', 'S', 'U', 'a'} # treat object and strings alike
if numeric_equal:
eq_types.update(('i', 'f'))
if isinstance(res, pd.DataFrame):
for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes],
axis=1).itertuples():
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
elif isinstance(res, (pd.Series, pd.Index)):
a = ddf._meta.dtype
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
if hasattr(ddf._meta, 'dtype'):
a = ddf._meta.dtype
if not hasattr(res, 'dtype'):
assert np.isscalar(res)
b = np.dtype(type(res))
else:
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
assert type(ddf._meta) == type(res)
def assert_max_deps(x, n, eq=True):
dependencies, dependents = get_deps(x.dask)
if eq:
assert max(map(len, dependencies.values())) == n
else:
assert max(map(len, dependencies.values())) <= n
| {
"repo_name": "mraspaud/dask",
"path": "dask/dataframe/utils.py",
"copies": "1",
"size": "21094",
"license": "bsd-3-clause",
"hash": 1418236844497588200,
"line_mean": 33.8661157025,
"line_max": 82,
"alpha_frac": 0.5669858728,
"autogenerated": false,
"ratio": 3.6755532322704303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.474253910507043,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
import traceback
import warnings
from datetime import datetime
from functools import partial
import numpy as np
import pandas as pd
from ..core.common import contains_cftime_datetimes
from ..core import indexing
from ..core.formatting import first_n_items, format_timestamp, last_item
from ..core.options import OPTIONS
from ..core.pycompat import PY3
from ..core.variable import Variable
from .variables import (
SerializationWarning, VariableCoder, lazy_elemwise_func, pop_to,
safe_setitem, unpack_for_decoding, unpack_for_encoding)
try:
from pandas.errors import OutOfBoundsDatetime
except ImportError:
# pandas < 0.20
from pandas.tslib import OutOfBoundsDatetime
# standard calendars recognized by cftime
_STANDARD_CALENDARS = set(['standard', 'gregorian', 'proleptic_gregorian'])
_NS_PER_TIME_DELTA = {'us': int(1e3),
'ms': int(1e6),
's': int(1e9),
'm': int(1e9) * 60,
'h': int(1e9) * 60 * 60,
'D': int(1e9) * 60 * 60 * 24}
TIME_UNITS = frozenset(['days', 'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'])
def _import_cftime():
'''
helper function handle the transition to netcdftime/cftime
as a stand-alone package
'''
try:
import cftime
except ImportError:
# in netCDF4 the num2date/date2num function are top-level api
try:
import netCDF4 as cftime
except ImportError:
raise ImportError("Failed to import cftime")
return cftime
def _require_standalone_cftime():
"""Raises an ImportError if the standalone cftime is not found"""
try:
import cftime # noqa: F401
except ImportError:
raise ImportError('Using a CFTimeIndex requires the standalone '
'version of the cftime library.')
def _netcdf_to_numpy_timeunit(units):
units = units.lower()
if not units.endswith('s'):
units = '%ss' % units
return {'microseconds': 'us', 'milliseconds': 'ms', 'seconds': 's',
'minutes': 'm', 'hours': 'h', 'days': 'D'}[units]
def _unpack_netcdf_time_units(units):
# CF datetime units follow the format: "UNIT since DATE"
# this parses out the unit and date allowing for extraneous
# whitespace.
matches = re.match('(.+) since (.+)', units)
if not matches:
raise ValueError('invalid time units: %s' % units)
delta_units, ref_date = [s.strip() for s in matches.groups()]
return delta_units, ref_date
def _decode_datetime_with_cftime(num_dates, units, calendar,
enable_cftimeindex):
cftime = _import_cftime()
if enable_cftimeindex:
_require_standalone_cftime()
dates = np.asarray(cftime.num2date(num_dates, units, calendar,
only_use_cftime_datetimes=True))
else:
dates = np.asarray(cftime.num2date(num_dates, units, calendar))
if (dates[np.nanargmin(num_dates)].year < 1678 or
dates[np.nanargmax(num_dates)].year >= 2262):
if not enable_cftimeindex or calendar in _STANDARD_CALENDARS:
warnings.warn(
'Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using dummy '
'cftime.datetime objects instead, reason: dates out '
'of range', SerializationWarning, stacklevel=3)
else:
if enable_cftimeindex:
if calendar in _STANDARD_CALENDARS:
dates = cftime_to_nptime(dates)
else:
try:
dates = cftime_to_nptime(dates)
except ValueError as e:
warnings.warn(
'Unable to decode time axis into full '
'numpy.datetime64 objects, continuing using '
'dummy cftime.datetime objects instead, reason:'
'{0}'.format(e), SerializationWarning, stacklevel=3)
return dates
def _decode_cf_datetime_dtype(data, units, calendar, enable_cftimeindex):
# Verify that at least the first and last date can be decoded
# successfully. Otherwise, tracebacks end up swallowed by
# Dataset.__repr__ when users try to view their lazily decoded array.
values = indexing.ImplicitToExplicitIndexingAdapter(
indexing.as_indexable(data))
example_value = np.concatenate([first_n_items(values, 1) or [0],
last_item(values) or [0]])
try:
result = decode_cf_datetime(example_value, units, calendar,
enable_cftimeindex)
except Exception:
calendar_msg = ('the default calendar' if calendar is None
else 'calendar %r' % calendar)
msg = ('unable to decode time units %r with %s. Try '
'opening your dataset with decode_times=False.'
% (units, calendar_msg))
if not PY3:
msg += ' Full traceback:\n' + traceback.format_exc()
raise ValueError(msg)
else:
dtype = getattr(result, 'dtype', np.dtype('object'))
return dtype
def decode_cf_datetime(num_dates, units, calendar=None,
enable_cftimeindex=False):
"""Given an array of numeric dates in netCDF format, convert it into a
numpy array of date time objects.
For standard (Gregorian) calendars, this function uses vectorized
operations, which makes it much faster than cftime.num2date. In such a
case, the returned array will be of type np.datetime64.
Note that time unit in `units` must not be smaller than microseconds and
not larger than days.
See also
--------
cftime.num2date
"""
num_dates = np.asarray(num_dates)
flat_num_dates = num_dates.ravel()
if calendar is None:
calendar = 'standard'
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS:
raise OutOfBoundsDatetime
delta = _netcdf_to_numpy_timeunit(delta)
try:
ref_date = pd.Timestamp(ref_date)
except ValueError:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using cftime
raise OutOfBoundsDatetime
# fixes: https://github.com/pydata/pandas/issues/14068
# these lines check if the the lowest or the highest value in dates
# cause an OutOfBoundsDatetime (Overflow) error
pd.to_timedelta(flat_num_dates.min(), delta) + ref_date
pd.to_timedelta(flat_num_dates.max(), delta) + ref_date
# Cast input dates to integers of nanoseconds because `pd.to_datetime`
# works much faster when dealing with integers
# make _NS_PER_TIME_DELTA an array to ensure type upcasting
flat_num_dates_ns_int = (flat_num_dates.astype(np.float64) *
_NS_PER_TIME_DELTA[delta]).astype(np.int64)
dates = (pd.to_timedelta(flat_num_dates_ns_int, 'ns') +
ref_date).values
except (OutOfBoundsDatetime, OverflowError):
dates = _decode_datetime_with_cftime(
flat_num_dates.astype(np.float), units, calendar,
enable_cftimeindex)
return dates.reshape(num_dates.shape)
def decode_cf_timedelta(num_timedeltas, units):
"""Given an array of numeric timedeltas in netCDF format, convert it into a
numpy timedelta64[ns] array.
"""
num_timedeltas = np.asarray(num_timedeltas)
units = _netcdf_to_numpy_timeunit(units)
shape = num_timedeltas.shape
num_timedeltas = num_timedeltas.ravel()
result = pd.to_timedelta(num_timedeltas, unit=units, box=False)
# NaT is returned unboxed with wrong units; this should be fixed in pandas
if result.dtype != 'timedelta64[ns]':
result = result.astype('timedelta64[ns]')
return result.reshape(shape)
def _infer_time_units_from_diff(unique_timedeltas):
for time_unit in ['days', 'hours', 'minutes', 'seconds']:
delta_ns = _NS_PER_TIME_DELTA[_netcdf_to_numpy_timeunit(time_unit)]
unit_delta = np.timedelta64(delta_ns, 'ns')
diffs = unique_timedeltas / unit_delta
if np.all(diffs == diffs.astype(int)):
return time_unit
return 'seconds'
def infer_calendar_name(dates):
"""Given an array of datetimes, infer the CF calendar name"""
if np.asarray(dates).dtype == 'datetime64[ns]':
return 'proleptic_gregorian'
else:
return np.asarray(dates).ravel()[0].calendar
def infer_datetime_units(dates):
"""Given an array of datetimes, returns a CF compatible time-unit string of
the form "{time_unit} since {date[0]}", where `time_unit` is 'days',
'hours', 'minutes' or 'seconds' (the first one that can evenly divide all
unique time deltas in `dates`)
"""
dates = np.asarray(dates).ravel()
if np.asarray(dates).dtype == 'datetime64[ns]':
dates = pd.to_datetime(dates, box=False)
dates = dates[pd.notnull(dates)]
reference_date = dates[0] if len(dates) > 0 else '1970-01-01'
reference_date = pd.Timestamp(reference_date)
else:
reference_date = dates[0] if len(dates) > 0 else '1970-01-01'
reference_date = format_cftime_datetime(reference_date)
unique_timedeltas = np.unique(np.diff(dates))
if unique_timedeltas.dtype == np.dtype('O'):
# Convert to np.timedelta64 objects using pandas to work around a
# NumPy casting bug: https://github.com/numpy/numpy/issues/11096
unique_timedeltas = pd.to_timedelta(unique_timedeltas, box=False)
units = _infer_time_units_from_diff(unique_timedeltas)
return '%s since %s' % (units, reference_date)
def format_cftime_datetime(date):
"""Converts a cftime.datetime object to a string with the format:
YYYY-MM-DD HH:MM:SS.UUUUUU
"""
return '{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}.{:06d}'.format(
date.year, date.month, date.day, date.hour, date.minute, date.second,
date.microsecond)
def infer_timedelta_units(deltas):
"""Given an array of timedeltas, returns a CF compatible time-unit from
{'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly
divide all unique time deltas in `deltas`)
"""
deltas = pd.to_timedelta(np.asarray(deltas).ravel(), box=False)
unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])
units = _infer_time_units_from_diff(unique_timedeltas)
return units
def cftime_to_nptime(times):
"""Given an array of cftime.datetime objects, return an array of
numpy.datetime64 objects of the same size"""
times = np.asarray(times)
new = np.empty(times.shape, dtype='M8[ns]')
for i, t in np.ndenumerate(times):
dt = datetime(t.year, t.month, t.day, t.hour, t.minute, t.second)
new[i] = np.datetime64(dt)
return new
def _cleanup_netcdf_time_units(units):
delta, ref_date = _unpack_netcdf_time_units(units)
try:
units = '%s since %s' % (delta, format_timestamp(ref_date))
except OutOfBoundsDatetime:
# don't worry about reifying the units if they're out of bounds
pass
return units
def _encode_datetime_with_cftime(dates, units, calendar):
"""Fallback method for encoding dates using cftime.
This method is more flexible than xarray's parsing using datetime64[ns]
arrays but also slower because it loops over each element.
"""
cftime = _import_cftime()
if np.issubdtype(dates.dtype, np.datetime64):
# numpy's broken datetime conversion only works for us precision
dates = dates.astype('M8[us]').astype(datetime)
def encode_datetime(d):
return np.nan if d is None else cftime.date2num(d, units, calendar)
return np.vectorize(encode_datetime)(dates)
def cast_to_int_if_safe(num):
int_num = np.array(num, dtype=np.int64)
if (num == int_num).all():
num = int_num
return num
def encode_cf_datetime(dates, units=None, calendar=None):
"""Given an array of datetime objects, returns the tuple `(num, units,
calendar)` suitable for a CF compliant time variable.
Unlike `date2num`, this function can handle datetime64 arrays.
See also
--------
cftime.date2num
"""
dates = np.asarray(dates)
if units is None:
units = infer_datetime_units(dates)
else:
units = _cleanup_netcdf_time_units(units)
if calendar is None:
calendar = infer_calendar_name(dates)
delta, ref_date = _unpack_netcdf_time_units(units)
try:
if calendar not in _STANDARD_CALENDARS or dates.dtype.kind == 'O':
# parse with cftime instead
raise OutOfBoundsDatetime
assert dates.dtype == 'datetime64[ns]'
delta_units = _netcdf_to_numpy_timeunit(delta)
time_delta = np.timedelta64(1, delta_units).astype('timedelta64[ns]')
ref_date = np.datetime64(pd.Timestamp(ref_date))
num = (dates - ref_date) / time_delta
except (OutOfBoundsDatetime, OverflowError):
num = _encode_datetime_with_cftime(dates, units, calendar)
num = cast_to_int_if_safe(num)
return (num, units, calendar)
def encode_cf_timedelta(timedeltas, units=None):
if units is None:
units = infer_timedelta_units(timedeltas)
np_unit = _netcdf_to_numpy_timeunit(units)
num = 1.0 * timedeltas / np.timedelta64(1, np_unit)
num = np.where(pd.isnull(timedeltas), np.nan, num)
num = cast_to_int_if_safe(num)
return (num, units)
class CFDatetimeCoder(VariableCoder):
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if (np.issubdtype(data.dtype, np.datetime64) or
contains_cftime_datetimes(variable)):
(data, units, calendar) = encode_cf_datetime(
data,
encoding.pop('units', None),
encoding.pop('calendar', None))
safe_setitem(attrs, 'units', units, name=name)
safe_setitem(attrs, 'calendar', calendar, name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
enable_cftimeindex = OPTIONS['enable_cftimeindex']
if 'units' in attrs and 'since' in attrs['units']:
units = pop_to(attrs, encoding, 'units')
calendar = pop_to(attrs, encoding, 'calendar')
dtype = _decode_cf_datetime_dtype(
data, units, calendar, enable_cftimeindex)
transform = partial(
decode_cf_datetime, units=units, calendar=calendar,
enable_cftimeindex=enable_cftimeindex)
data = lazy_elemwise_func(data, transform, dtype)
return Variable(dims, data, attrs, encoding)
class CFTimedeltaCoder(VariableCoder):
def encode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_encoding(variable)
if np.issubdtype(data.dtype, np.timedelta64):
data, units = encode_cf_timedelta(
data, encoding.pop('units', None))
safe_setitem(attrs, 'units', units, name=name)
return Variable(dims, data, attrs, encoding)
def decode(self, variable, name=None):
dims, data, attrs, encoding = unpack_for_decoding(variable)
if 'units' in attrs and attrs['units'] in TIME_UNITS:
units = pop_to(attrs, encoding, 'units')
transform = partial(decode_cf_timedelta, units=units)
dtype = np.dtype('timedelta64[ns]')
data = lazy_elemwise_func(data, transform, dtype=dtype)
return Variable(dims, data, attrs, encoding)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/coding/times.py",
"copies": "1",
"size": "15932",
"license": "apache-2.0",
"hash": -8077839839982989000,
"line_mean": 35.7096774194,
"line_max": 79,
"alpha_frac": 0.6324378609,
"autogenerated": false,
"ratio": 3.776250296278739,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4908688157178739,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
from .core import common_subexpression
from .expressions import Expr
from .reductions import Reduction, Summary, summary
from ..dispatch import dispatch
from .expressions import dshape_method_list
from datashape import dshape, Record, Map, Unit, var
__all__ = ['by', 'By', 'count_values']
def _names_and_types(expr):
schema = expr.dshape.measure
schema = getattr(schema, 'ty', schema)
if isinstance(schema, Record):
return schema.names, schema.types
if isinstance(schema, Unit):
return [expr._name], [expr.dshape.measure]
if isinstance(schema, Map):
return [expr._name], [expr.dshape.measure.key]
raise ValueError("Unable to determine name and type of %s" % expr)
class By(Expr):
""" Split-Apply-Combine Operator
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = by(t['name'], total=t['amount'].sum())
>>> data = [['Alice', 100, 1],
... ['Bob', 200, 2],
... ['Alice', 50, 3]]
>>> from blaze.compute.python import compute
>>> sorted(compute(e, data))
[('Alice', 150), ('Bob', 200)]
"""
_arguments = 'grouper', 'apply'
@property
def _child(self):
return common_subexpression(self.grouper, self.apply)
def _schema(self):
grouper_names, grouper_types = _names_and_types(self.grouper)
apply_names, apply_types = _names_and_types(self.apply)
names = grouper_names + apply_names
types = grouper_types + apply_types
return dshape(Record(list(zip(names, types))))
def _dshape(self):
# TODO: think if this should be generalized
return var * self.schema
def __str__(self):
return '%s(%s, %s)' % (type(self).__name__.lower(),
self.grouper,
re.sub(r'^summary\((.*)\)$', r'\1',
str(self.apply)))
@dispatch(Expr, Reduction)
def by(grouper, s):
raise ValueError("This syntax has been removed.\n"
"Please name reductions with keyword arguments.\n"
"Before: by(t.name, t.amount.sum())\n"
"After: by(t.name, total=t.amount.sum())")
@dispatch(Expr, Summary)
def by(grouper, s):
return By(grouper, s)
@dispatch(Expr)
def by(grouper, **kwargs):
return By(grouper, summary(**kwargs))
def count_values(expr, sort=True):
"""
Count occurrences of elements in this column
Sort by counts by default
Add ``sort=False`` keyword to avoid this behavior.
"""
result = by(expr, count=expr.count())
if sort:
result = result.sort('count', ascending=False)
return result
dshape_method_list.extend([
(lambda ds: len(ds.shape) == 1, set([count_values])),
])
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/expr/split_apply_combine.py",
"copies": "3",
"size": "2935",
"license": "bsd-3-clause",
"hash": -4449423408555156500,
"line_mean": 26.6886792453,
"line_max": 71,
"alpha_frac": 0.5884156729,
"autogenerated": false,
"ratio": 3.655043586550436,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 106
} |
from __future__ import absolute_import, division, print_function
import re
from .data import ComponentID
from .subset import Subset, SubsetState
from .component_link import ComponentLink
TAG_RE = re.compile('\{\s*(?P<tag>\S+)\s*\}')
__all__ = ['ParsedCommand', 'ParsedSubsetState']
def _ensure_only_component_references(cmd, references):
""" Search through tag references in a command, ensure that
they all reference ComponentIDs
Parameters
----------
cmd : string. A template command
referenes : a mapping from tags to substitution objects
Raises
------
TypeError, if cmd does not refer only to ComponentIDs
"""
for match in TAG_RE.finditer(cmd):
tag = match.group('tag')
if tag not in references or not \
isinstance(references[tag], ComponentID):
raise TypeError(
"Reference to %s, which is not a ComponentID" % tag)
def _reference_list(cmd, references):
""" Return a list of the values in the references mapping whose
keys appear in the command
Parameters
----------
cmd : string. A template command
references : a mapping from tags to substitution objects
Returns
-------
A list of the unique values in references that appear in the command
Examples
--------
>>> cmd = '{g} - {r} + {g}'
>>> references = {'g' : g_object, 'r' : r_object, 'i' : i_object}
>>> _reference_list(cmd, references)
[g_object, r_object]
Raises
------
KeyError: if tags in the command aren't in the reference mapping
"""
try:
return list(set(references[m.group('tag')]
for m in TAG_RE.finditer(cmd)))
except KeyError:
raise KeyError("Tags from command not in reference mapping")
def _dereference(cmd, references):
""" Dereference references in the template command, to refer
to objects in the reference mapping
Parameters
----------
cmd : Command string
references : mapping from template tags to objects
Returns
-------
A new command, where all the tags have been subsituted as follows:
"{tag}" -> 'data[references["tag"], __view]', if references[tag] is a ComponentID
"{tag}" -> 'references["tag"].to_mask(__view)' if references[tag] is a Subset
__view is a placeholder variable referencing the view
passed to data.__getitem__ and subset.to_mask
Raises
------
TypeError, if a tag in the command maps to something other than
a ComponentID or Subset object
"""
def sub_func(match):
tag = match.group('tag')
if isinstance(references[tag], ComponentID):
return 'data[references["%s"], __view]' % tag
elif isinstance(references[tag], Subset):
return 'references["%s"].to_mask(__view)' % tag
else:
raise TypeError("Tag %s maps to unrecognized type: %s" %
(tag, type(references[tag])))
return TAG_RE.sub(sub_func, cmd)
def _validate(cmd, references):
""" Make sure all references in the command are in the reference mapping
Raises
------
TypeError, if a tag is missing from references
"""
for match in TAG_RE.finditer(cmd):
tag = match.group('tag')
if tag not in references:
raise TypeError("Tag %s not in reference mapping: %s" %
(tag, sorted(references.keys())))
class ParsedCommand(object):
""" Class to manage commands that define new components and subsets """
def __init__(self, cmd, references):
""" Create a new parsed command object
Parameters
----------
cmd : str. A template command. Can only reference ComponentID objects
references : mapping from command templates to substitution objects
"""
_validate(cmd, references)
self._cmd = cmd
self._references = references
def ensure_only_component_references(self):
_ensure_only_component_references(self._cmd, self._references)
@property
def reference_list(self):
return _reference_list(self._cmd, self._references)
def evaluate(self, data, view=None):
from .. import env
# pylint: disable=W0613, W0612
references = self._references
cmd = _dereference(self._cmd, self._references)
scope = vars(env)
scope['__view'] = view
return eval(cmd, vars(env), locals()) # careful!
def __gluestate__(self, context):
return dict(cmd=self._cmd,
references=dict((k, context.id(v))
for k, v in self._references.items()))
@classmethod
def __setgluestate__(cls, rec, context):
cmd = rec['cmd']
ref = dict((k, context.object(v))
for k, v in rec['references'].items())
return cls(cmd, ref)
class ParsedComponentLink(ComponentLink):
""" Class to create a new ComponentLink from a ParsedCommand object. """
def __init__(self, to_, parsed):
""" Create a new link
Parameters
----------
to_ : ComponentID instance to associate with the new component
parsed : A ParsedCommand object
"""
parsed.ensure_only_component_references()
super(ParsedComponentLink, self).__init__(
parsed.reference_list, to_, lambda: None)
self._parsed = parsed
def compute(self, data, view=None):
return self._parsed.evaluate(data, view)
def __gluestate__(self, context):
return dict(parsed=context.do(self._parsed),
to=context.id(self.get_to_id()))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(context.object(rec['to']),
context.object(rec['parsed']))
class ParsedSubsetState(SubsetState):
""" A SubsetState defined by a ParsedCommand object """
def __init__(self, parsed):
""" Create a new object
Parameters
----------
parsed : A ParsedCommand object
"""
super(ParsedSubsetState, self).__init__()
self._parsed = parsed
def to_mask(self, data, view=None):
""" Calculate the new mask by evaluating the dereferenced command """
result = self._parsed.evaluate(data)
if view is not None:
result = result[view]
return result
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/parse.py",
"copies": "1",
"size": "6427",
"license": "bsd-3-clause",
"hash": -5441677174968901000,
"line_mean": 29.7511961722,
"line_max": 87,
"alpha_frac": 0.5991909133,
"autogenerated": false,
"ratio": 4.3543360433604335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006314069399153651,
"num_lines": 209
} |
from __future__ import absolute_import, division, print_function
import re
from ...external.qt.QtGui import QDialog, QMessageBox
from ...external.qt import QtCore
from ... import core
from ...core import parse
from ...utils.qt import CompletionTextEdit
from ..qtutil import load_ui
def disambiguate(label, labels):
""" Changes name of label if it conflicts with labels list
Parameters
----------
label : string
labels : collection of strings
Returns
-------
label, perhaps appended with a suffix "_{number}". The output
does not appear in labels
"""
label = label.replace(' ', '_')
if label not in labels:
return label
suffix = 1
while label + ('_%i' % suffix) in labels:
suffix += 1
return label + ('_%i' % suffix)
class ColorizedCompletionTextEdit(CompletionTextEdit):
def insertPlainText(self, *args):
super(ColorizedCompletionTextEdit, self).insertPlainText(*args)
self.reformat_text()
def keyReleaseEvent(self, event):
super(ColorizedCompletionTextEdit, self).keyReleaseEvent(event)
self.reformat_text()
def reformat_text(self):
# Here every time a key is released, we re-colorize the expression.
# We show valid components in blue, and invalid ones in red. We
# recognized components because they contain a ":" which is not valid
# Python syntax (except if one considers lambda functions, but we can
# probably ignore that here)
text = self.toPlainText()
# If there are no : in the text we don't need to do anything
if not ":" in text:
return
pattern = '[^\\s]*:[^\\s]*'
def format_components(m):
component = m.group(0)
if component in self.word_list:
return "<font color='#0072B2'><b>" + component + "</b></font> "
else:
return "<font color='#D55E00'><b>" + component + "</b></font> "
html = re.sub(pattern, format_components, text)
tc = self.textCursor()
pos = tc.position()
self.setHtml(html)
# Sometimes the HTML gets rid of double spaces so we have to make
# sure the position isn't greater than the text length.
text = self.toPlainText()
pos = min(pos, len(text))
tc.setPosition(pos)
self.setTextCursor(tc)
self.setAlignment(QtCore.Qt.AlignCenter)
class CustomComponentWidget(object):
"""
Dialog to add derived components to data via parsed commands.
"""
def __init__(self, collection, parent=None):
# Load in ui file to set up widget
self.ui = load_ui('custom_component_widget', parent)
# In the ui file we do not create the text field for the expression
# because we want to use a custom widget that supports auto-complete.
self.ui.expression = ColorizedCompletionTextEdit()
self.ui.verticalLayout_3.addWidget(self.ui.expression)
self.ui.expression.setAlignment(QtCore.Qt.AlignCenter)
self.ui.expression.setObjectName("expression")
self.ui.expression.setToolTip("Define a new component. You can either "
"type out the full name of a component\n"
"with the data:component syntax, or "
"start typing and press TAB to use "
"tab-completion.\n Blue-colored "
"components are valid, while "
"Red-colored components are invalid.")
self._labels = {}
self._data = {}
self._collection = collection
self._gather_components()
self._gather_data()
self._init_widgets()
self._connect()
# Set up auto-completion. While the auto-complete window is open, we
# cannot add/remove datasets or other components, so we can populate
# the auto_completer straight off.
self.ui.expression.set_word_list(list(self._labels.keys()))
def _connect(self):
cl = self.ui.component_list
cl.itemDoubleClicked.connect(self._add_to_expression)
def _init_widgets(self):
""" Set up default state of widget """
comps = self.ui.component_list
comps.addItems(sorted(self._labels.keys()))
data = self.ui.data_list
data.addItems(sorted(self._data.keys()))
def _gather_components(self):
""" Build a mapping from unique labels -> componentIDs """
comps = set()
for data in self._collection:
for c in data.components:
if c in comps:
continue
label = "%s:%s" % (data.label, c)
label = disambiguate(label, self._labels)
self._labels[label] = c
comps.add(c)
def _gather_data(self):
""" Build a mapping from unique labels -> data objects """
for data in self._collection:
label = data.label
label = disambiguate(label, self._data)
self._data[label] = data
def _selected_data(self):
""" Yield all data objects that are selected in the DataList """
for items in self.ui.data_list.selectedItems():
yield self._data[str(items.text())]
def _create_link(self):
""" Create a ComponentLink form the state of the GUI
Returns
-------
A new component link
"""
expression = str(self.ui.expression.toPlainText())
# To maintain backward compatibility with previous versions of glue,
# we add curly brackets around the components in the expression.
pattern = '[^\\s]*:[^\\s]*'
def add_curly(m):
return "{" + m.group(0) + "}"
expression = re.sub(pattern, add_curly, expression)
pc = parse.ParsedCommand(expression, self._labels)
label = str(self.ui.new_label.text()) or 'new component'
new_id = core.data.ComponentID(label)
link = parse.ParsedComponentLink(new_id, pc)
return link
@property
def _number_targets(self):
"""
How many targets are selected
"""
return len(self.ui.data_list.selectedItems())
def _add_link_to_targets(self, link):
""" Add a link to all the selected data """
for target in self._selected_data():
target.add_component_link(link)
def _add_to_expression(self, item):
""" Add a component list item to the expression editor """
addition = '%s ' % item.text()
expression = self.ui.expression
expression.insertPlainText(addition)
@staticmethod
def create_component(collection):
"""Present user with a dialog to define and add new components.
Parameters
----------
collection : A `DataCollection` to edit
"""
# pylint: disable=W0212
widget = CustomComponentWidget(collection)
while True:
widget.ui.show()
if widget.ui.exec_() == QDialog.Accepted:
if len(str(widget.ui.expression.toPlainText())) == 0:
QMessageBox.critical(widget.ui, "Error", "No expression set",
buttons=QMessageBox.Ok)
elif widget._number_targets == 0:
QMessageBox.critical(widget.ui, "Error", "Please specify the target dataset(s)",
buttons=QMessageBox.Ok)
elif len(widget.ui.new_label.text()) == 0:
QMessageBox.critical(widget.ui, "Error", "Please specify the new component name",
buttons=QMessageBox.Ok)
else:
link = widget._create_link()
if link:
widget._add_link_to_targets(link)
break
else:
break
def main():
from glue.core.data import Data
from glue.core.data_collection import DataCollection
import numpy as np
x = np.random.random((5, 5))
y = x * 3
data = DataCollection(Data(label='test', x=x, y=y))
CustomComponentWidget.create_component(data)
for d in data:
print(d.label)
for c in d.components:
print('\t%s' % c)
if __name__ == "__main__":
main()
| {
"repo_name": "JudoWill/glue",
"path": "glue/qt/widgets/custom_component_widget.py",
"copies": "1",
"size": "8477",
"license": "bsd-3-clause",
"hash": -8110531840612001000,
"line_mean": 33.3198380567,
"line_max": 101,
"alpha_frac": 0.5727262003,
"autogenerated": false,
"ratio": 4.362840967575914,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5435567167875914,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
from imps.rebuilders import does_line_have_hash_noqa, Rebuilder, sortable_key
from imps.strings import get_doc_string
IMPORT_LINE = r'^import\s.*'
FROM_IMPORT_LINE = r'^from\s.*import\s.*'
FROM_IMPORT_LINE_WITH_PARAN = r'^from\s.*import\s.*\('
# We do sorting here early for a single line with multiple imports.
def split_imports(s):
from_part, import_list = re.split(r'\s*import\s+', s, 1)
if '#' in import_list:
search_to = import_list.find('#')
else:
search_to = len(import_list)
ending = import_list[search_to:]
if ending:
ending = " " + ending
if from_part:
from_part += " "
imps = import_list[0:search_to].split(',')
imps = sorted(set([i.strip() for i in imps if i.strip()]), key=sortable_key)
return from_part + "import " + ', '.join(imps) + ending
def is_line_an_import(l):
return re.match(FROM_IMPORT_LINE, l) or re.match(IMPORT_LINE, l)
def _is_there_no_close_paran(l):
return ')' not in l or ('#' in l and l.find('#') < l.find(')'))
class Sorter():
def __init__(self, type='s', max_line_length=80, local_imports=None, indent=" "):
self.reader = ReadInput(indent)
self.local_imports = local_imports or []
self.builder_object = Rebuilder(type, int(max_line_length), indent)
def sort(self, lines):
self.reader.clean()
self.reader.process_and_split(lines)
return self.builder_object.rebuild(self.local_imports, *self.reader.get_imports_as_dicts())
class ReadInput():
def __init__(self, indent):
self.indent = indent
def clean(self):
self.lines_before_any_imports = None
self.lines_before_import = []
self.pre_import = {}
self.pre_from_import = {}
def _store_line(self, target_map, line):
# Special case keep the first comments at the very top of the file (eg utf encodings)
if self.lines_before_any_imports is None:
self.lines_before_any_imports = self.lines_before_import
self.lines_before_import = []
target_map[line] = self.lines_before_import
self.lines_before_import = []
def _process_line(self, line):
if does_line_have_hash_noqa(line):
self.lines_before_import.append(line)
elif re.match(IMPORT_LINE, line):
self._store_line(self.pre_import, split_imports(line))
elif re.match(FROM_IMPORT_LINE_WITH_PARAN, line):
self._process_from_paren_block(line)
elif re.match(FROM_IMPORT_LINE, line):
self._store_line(self.pre_from_import, split_imports(line))
else:
self.lines_before_import.append(line)
def _process_from_paren_block(self, line):
"""
If there are no comments we squash a from X import (Y,Z) into -> from X import Y,Z
by removing the parenthesis
However if there are comments we must split them into comments for the import on the line below
the comment and comments on the same line as the import.
Imports are then sorted inside this method to preserve the position of the comments.
"""
if '# noqa' in line:
self._store_line(self.pre_from_import, split_imports(line))
return
if '#' not in line:
line = line.replace('(', '').replace(')', '')
self._store_line(self.pre_from_import, split_imports(line))
return
base = line[0:line.find('(') + 1]
line = line[line.find('(') + 1:line.rfind(')')]
pre_comments = []
pre_imp_comment = {}
same_line_comment = {}
old_import = None
while line:
is_newline = line.find('\n') < line.find('#')
line = line.lstrip()
# If the next part of l is NOT a comment.
if line.find('#') != 0:
# l[0:end_marker] is the name of the next import
end_marker = line.find(',')
if end_marker == -1:
end_marker = line.find('#')
if end_marker == -1:
end_marker = line.find('\n')
old_import = line[0:end_marker]
if not old_import:
break
same_line_comment[old_import] = ''
pre_imp_comment[old_import] = pre_comments
pre_comments = []
line = line[end_marker + 1:]
else:
comment = line[line.find('#'):line.find('\n')]
# If the comment is on a newline mark it as a 'pre-import-comment' to go on the line
# above. (or if old_import is None which means this is the first line).
if is_newline or not old_import:
pre_comments.append(comment)
else:
same_line_comment[old_import] = comment
if old_import not in pre_imp_comment:
pre_imp_comment[old_import] = []
line = line[line.find('\n'):]
for i in sorted(same_line_comment.keys(), key=lambda s: s.lower()):
if pre_imp_comment.get(i):
for c in pre_imp_comment[i]:
base += '\n' + self.indent + c
base += '\n' + self.indent + i + ','
if same_line_comment[i]:
base += " " + same_line_comment[i]
# include the last pre import comments - they were at the end
for c in pre_comments:
base += '\n' + self.indent + c
base += '\n)'
self.pre_from_import[base] = self.lines_before_import
self.lines_before_import = []
def process_and_split(self, text):
lines = text.split('\n')
i = -1
while i < len(lines) - 1:
i += 1
data = lines[i]
if is_line_an_import(lines[i]) and '\\' in lines[i] and lines[i].strip()[-1] == '\\':
while '\\' in lines[i] and lines[i].strip()[-1] == '\\' and i < len(lines) - 1:
data = data.strip()[0:-1] + lines[i+1]
i += 1
if re.match(FROM_IMPORT_LINE_WITH_PARAN, data):
while _is_there_no_close_paran(lines[i]) and i < len(lines) - 1:
i += 1
data += '\n' + lines[i]
# If a doc_strings was opened but not closed on this line:
doc_string_points = get_doc_string(data)
if len(doc_string_points) % 2 == 1:
giant_comment = doc_string_points[-1][1]
while i < len(lines) - 1:
i += 1
data += '\n' + lines[i]
comment_point = lines[i].find(giant_comment)
if comment_point != -1:
after_comment = lines[i][comment_point + 3:]
doc_string_points = get_doc_string(after_comment)
if len(doc_string_points) % 2 == 0:
break
self._process_line(data)
def get_imports_as_dicts(self):
if self.lines_before_any_imports is None:
self.lines_before_any_imports = []
return self.pre_import, self.pre_from_import, self.lines_before_any_imports, self.lines_before_import
| {
"repo_name": "bootandy/imps",
"path": "imps/core.py",
"copies": "1",
"size": "7371",
"license": "apache-2.0",
"hash": 8002470811603217000,
"line_mean": 36.2272727273,
"line_max": 109,
"alpha_frac": 0.5365622032,
"autogenerated": false,
"ratio": 3.7133501259445842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47499123291445844,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
from pymel.core import createNode, listConnections, connectAttr, PyNode, select, mel
from maya.api import OpenMaya
from maya.cmds import ls
import pdil
from . import controllerShape, cardlister
def makePickwalkNode(ikControls=None, fkControls=None):
'''
node.ikLinker[*].ikController
.ikLeftIndexIk
.ikLeftIndexFk
.ikRightIndexIk
.ikRightIndexFk
'''
ikControls = ikControls if ikControls else []
fkControls = fkControls if fkControls else []
obj = createNode('network')
obj.addAttr('up', at='message')
obj.addAttr('down', at='message')
obj.addAttr('switcher', at='double', dv=0)
mobj = pdil.capi.asMObject(obj)
for xk in ['ik', 'fk']:
cattr = OpenMaya.MFnCompoundAttribute()
mattr = OpenMaya.MFnMessageAttribute()
nattr = OpenMaya.MFnNumericAttribute()
controlLinker = cattr.create(xk + 'Linker', xk)
cattr.array = True
controller = mattr.create(xk + 'Controller', xk + 'ctrl' )
left = mattr.create(xk + 'Left', xk + 'l')
right = mattr.create(xk + 'Right', xk + 'r')
leftIndexIk = nattr.create(xk + 'LeftIndexIk', xk + 'lii', OpenMaya.MFnNumericData.kInt, -1)
leftIndexFk = nattr.create(xk + 'LeftIndexFk', xk + 'lif', OpenMaya.MFnNumericData.kInt, -1)
rightIndexIk = nattr.create(xk + 'RightIndexIk', xk + 'rii', OpenMaya.MFnNumericData.kInt, -1)
rightIndexFk = nattr.create(xk + 'RightIndexFk', xk + 'rif', OpenMaya.MFnNumericData.kInt, -1)
cattr.addChild(controller)
cattr.addChild(left)
cattr.addChild(right)
cattr.addChild(leftIndexIk)
cattr.addChild(leftIndexFk)
cattr.addChild(rightIndexIk)
cattr.addChild(rightIndexFk)
mobj.addAttribute(controlLinker)
for i, ctrl in enumerate(fkControls):
print(ctrl, type(ctrl))
ctrl.message >> obj.fkLinker[i].fkController
for i, ctrl in enumerate(ikControls):
ctrl.message >> obj.ikLinker[i].ikController
return obj
def getIkControlList(ik):
if not ik:
return []
return [ctrl for name, ctrl in ik.subControl.items()] + [ik]
def getFkControlList(fk):
if not fk:
return []
return [fk] + [ctrl for name, ctrl in fk.subControl.items()]
def setSide(srcPicker, sourceXk, destPicker, destXk, side):
src = srcPicker.attr( sourceXk + 'Linker' )
dest = destPicker.attr( destXk + 'Linker' )
srcMax = src.numElements() - 1
destMax = dest.numElements() - 1
targets = [int(round( (i / srcMax) * destMax )) for i in range( srcMax + 1 )]
print(targets)
plug = '{xk}Linker[{{i}}].{xk}{Side}Index{dxk}'.format(xk=sourceXk, dxk=destXk.title(), Side=side)
for i, target in enumerate(targets):
srcPicker.attr( plug.format(i=i) ).set( target )
def setLeft(srcPicker, destPicker):
setSide(srcPicker, 'ik', destPicker, 'ik', 'Left')
setSide(srcPicker, 'ik', destPicker, 'fk', 'Left')
setSide(srcPicker, 'fk', destPicker, 'ik', 'Left')
setSide(srcPicker, 'fk', destPicker, 'fk', 'Left')
setSide(destPicker, 'ik', srcPicker, 'ik', 'Right')
setSide(destPicker, 'ik', srcPicker, 'fk', 'Right')
setSide(destPicker, 'fk', srcPicker, 'ik', 'Right')
setSide(destPicker, 'fk', srcPicker, 'fk', 'Right')
for plug in srcPicker.ikLinker:
destPicker.message >> plug.ikLeft
for plug in srcPicker.fkLinker:
destPicker.message >> plug.fkLeft
for plug in destPicker.ikLinker:
srcPicker.message >> plug.ikRight
for plug in destPicker.fkLinker:
srcPicker.message >> plug.fkRight
def buildDefaultNetwork():
cardHierarchy = cardlister.cardHierarchy()
for parent, children in cardHierarchy:
#for parent in [PyNode('Rig:Shoulder_card')]:
if parent:
if parent.outputCenter:
ik = getIkControlList(parent.outputCenter.ik)
fk = getFkControlList(parent.outputCenter.fk)
makePickwalkNode(ik, fk)
else:
ikLeft = getIkControlList(parent.outputLeft.ik)
fkLeft = getFkControlList(parent.outputLeft.fk)
pickerLeft = makePickwalkNode(ikLeft, fkLeft)
if ikLeft:
switcher = controllerShape.getSwitcherPlug(ikLeft[0])
connectAttr(switcher, pickerLeft.switcher)
ikRight = getIkControlList(parent.outputRight.ik)
fkRight = getFkControlList(parent.outputRight.fk)
pickerRight = makePickwalkNode(ikRight, fkRight)
if ikRight:
switcher = controllerShape.getSwitcherPlug(ikRight[0])
connectAttr(switcher, pickerRight.switcher)
if True: # Run default linkage of the sides to eachother
setLeft(pickerRight, pickerLeft)
setLeft(pickerLeft, pickerRight)
'''
if ikLeft:
for i, (left, right) in enumerate(zip(pickerLeft.ikLinker, pickerRight.ikLinker ) ):
pickerRight.message >> left.ikLeft
left.ikLeftIndexIk.set( i )
pickerRight.message >> left.ikRight
left.ikRightIndexIk.set( i )
pickerLeft.message >> right.ikRight
right.ikRightIndexIk.set( i )
pickerLeft.message >> right.ikLeft
right.ikLeftIndexIk.set( i )
if fkLeft:
for i, (left, right) in enumerate(zip(pickerLeft.fkLinker, pickerRight.fkLinker ) ):
pickerRight.message >> left.fkLeft
left.fkLeftIndexFk.set( i )
pickerRight.message >> left.fkRight
left.fkRightIndexFk.set( i )
pickerLeft.message >> right.fkRight
right.fkRightIndexFk.set( i )
pickerLeft.message >> right.fkLeft
right.fkLeftIndexFk.set( i )
'''
pickerConnections = set([
'ikLeftIndexIk',
'ikLeftIndexFk',
'ikRightIndexIk',
'ikRightIndexFk',
'fkLeftIndexIk',
'fkLeftIndexFk',
'fkRightIndexIk',
'fkRightIndexFk',
])
def getPickerPlug(ctrl):
''' Return the picker plug this control is connected to.
Example return: 'network27.fkLinker[1].fkController'
'''
for plug in listConnections( ctrl + '.message', p=True, s=False, d=True ):
if 'kLinker[' in str(plug):
return plug
def listCon(plug, fallback):
''' Wrap listConnections returing the first connection or None
'''
cons = listConnections(plug)
if cons:
return cons[0]
else:
return fallback
def getNextNode(plug, d, fallback):
listElement = plug.getParent()
name = listElement.attrName()
xk = name[:2]
picker = plug.node()
if d == 'down':
count = picker.attr(name).getNumElements()
index = listElement.index()
if index < (count - 1):
index += 1
#plug.name().replace('[%i]' % count)
return listCon( re.sub( r'\[\d+\]', '[%i]' % index, str(plug)), fallback)
#return picker.attr(name)[count].?? .listConections()[0]
else:
return listCon(picker.down, fallback)
elif d == 'up':
count = picker.attr(name).getNumElements()
index = listElement.index()
if index > 0:
index -= 1
return listCon( re.sub( r'\[\d+\]', '[%i]' % index, str(plug)), fallback)
#return picker.attr(name)[count].?? .listConections()[0]
else:
return listCon(picker.up, fallback)
elif d in ('left', 'right'):
side = d.title()
otherPicker = listCon( listElement.attr(xk + side), fallback)
if otherPicker:
if otherPicker.switcher.get() < 0.5:
target_index = listElement.attr( xk + side + 'IndexFk' ).get()
return listCon( otherPicker.fkLinker[ target_index ].fkController, fallback)
else:
target_index = listElement.attr( xk + side + 'IndexIk' ).get()
return listCon( otherPicker.ikLinker[ target_index ].ikController, fallback)
#editMenuUpdate MayaWindow|mainEditMenu;
#string $nameCommandCmd = "nameCommand -ann \"FossilPickWalkUpNameCommand\" -command (\"FossilPickWalkUp\") FossilPickWalkUpNameCommand"; eval($nameCommandCmd);
#// Result: FossilPickWalkUpNameCommand //
def fossilPickWalk(d='down'):
allControls = set( ls('*.fossilCtrlType', o=True, r=True) )
selected = set( ls(sl=True) )
if selected.issubset(allControls):
#maya.cmds.attributeQuery( 'fossil9CtrlType', n='Shoulder_L_ctrl', ex=1 )
pickerPlugs = [getPickerPlug(ctrl) for ctrl in selected]
if all(pickerPlugs):
# All the selection are controls so we can process their special connections
select( [getNextNode(pickerPlug, d, ctrl) for pickerPlug, ctrl in zip(pickerPlugs, selected)] )
return
# Fallback of normal pickwalking
if d == 'down':
mel.eval('pickWalkDown')
elif d == 'up':
mel.eval('pickWalkUp')
elif d == 'left':
mel.eval('pickWalkLeft')
elif d == 'right':
mel.eval('pickWalkRight')
'''
[[None, [nt.Card(u'Pelvis_card')]],
[nt.Card(u'Pelvis_card'), [nt.Card(u'Hips_card'), nt.Card(u'Spine_card')]],
[nt.Card(u'Hips_card'), [nt.Card(u'Hip_card')]],
[nt.Card(u'Hip_card'), [nt.Card(u'Ball_card')]],
[nt.Card(u'Ball_card'), []],
[nt.Card(u'Spine_card'), [nt.Card(u'Clavicle_card'), nt.Card(u'Neck_card')]],
[nt.Card(u'Clavicle_card'), [nt.Card(u'Shoulder_card')]],
[nt.Card(u'Shoulder_card'),
[nt.Card(u'Hand_card'), nt.Card(u'Index_card'), nt.Card(u'Middle_card'), nt.Card(u'Pinky_card'), nt.Card(u'Ring_card'), nt.Card(u'Thumb_card')]],
[nt.Card(u'Hand_card'), []],
[nt.Card(u'Index_card'), []],
[nt.Card(u'Middle_card'), []],
[nt.Card(u'Pinky_card'), []],
[nt.Card(u'Ring_card'), []],
[nt.Card(u'Thumb_card'), []],
[nt.Card(u'Neck_card'), [nt.Card(u'Head_card')]],
[nt.Card(u'Head_card'), []]] #
cards = pdil.tool.fossil.cardlister.cardHierarchy()
for parent, children in cards:
childPickers = []
for child in children:
picker = makePickwalkNode()
childPickers.append(picker)
if parent:
parent.message >> picker.up
#for prev, nxt in zip( childPickers, )
if parent and children:
childPickers[0].message >> parent.down
''' | {
"repo_name": "patcorwin/fossil",
"path": "pdil/tool/fossil/lib/pickwalk.py",
"copies": "1",
"size": "11283",
"license": "bsd-3-clause",
"hash": -2393379568364777000,
"line_mean": 32.7844311377,
"line_max": 160,
"alpha_frac": 0.5692634938,
"autogenerated": false,
"ratio": 3.5149532710280376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45842167648280374,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
hex_pattern = re.compile('[a-f]+')
def key_split(s): # from distributed.utils
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split('-')
result = words[0].lstrip("'(\"")
for word in words[1:]:
if word.isalpha() and not (len(word) == 8 and
hex_pattern.match(word) is not None):
result += '-' + word
else:
break
if len(result) == 32 and re.match(r'[a-f0-9]{32}', result):
return 'data'
else:
if result[0] == '<':
result = result.strip('<>').split()[0].split('.')[-1]
return result
except:
return 'Other'
| {
"repo_name": "lensacom/dask.mesos",
"path": "daskos/utils.py",
"copies": "1",
"size": "1380",
"license": "apache-2.0",
"hash": 8209752527384422000,
"line_mean": 25.0377358491,
"line_max": 76,
"alpha_frac": 0.4717391304,
"autogenerated": false,
"ratio": 3.2624113475177303,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9226288842697604,
"avg_score": 0.001572327044025157,
"num_lines": 53
} |
from __future__ import absolute_import, division, print_function
import re
import appr.semver as semver
from appr.exception import Unsupported
from appr.models.kv.models_index_base import ModelsIndexBase
from appr.models.package_base import PackageBase
class PackageKvBase(PackageBase):
index_class = ModelsIndexBase
@property
def index(self):
return self.index_class(self.package)
@classmethod
def _fetch(cls, package, release, media_type='kpm'):
index = cls.index_class(package)
return index.release(release, media_type)
# def channels(self, channel_class, iscurrent=True):
# return self.index.release_channels(self.release)
@classmethod
def all_releases(cls, package, media_type=None):
index = cls.index_class(package)
return index.releases(media_type)
@classmethod
def dump_all(cls, blob_cls):
index = cls.index_class()
result = []
for package_info in index.packages():
package_name = package_info['namespace'] + "/" + package_info['name']
releaseindex = cls.index_class(package_name)
for release in releaseindex.releases():
for _, package_data in releaseindex.release_manifests(release).iteritems():
package_data['channels'] = releaseindex.release_channels(release)
package_data['blob'] = releaseindex.get_blob(package_data['content']['digest'])
result.append(package_data)
return result
@classmethod
def all(cls, namespace=None, media_type=None, search=None, **kwargs):
index = cls.index_class()
result = []
matching = None
if search:
matching = cls.search(search)
for package_data in index.packages(namespace):
namespace, name = package_data['namespace'], package_data['name']
package_name = "%s/%s" % (namespace, name)
if matching is not None and package_name not in matching:
continue
created_at = package_data['created_at']
releaseindex = cls.index_class(package_name)
available_releases = [
str(x)
for x in sorted(
semver.versions(releaseindex.releases(media_type=media_type), False),
reverse=True)
] # yapf: disable
if not available_releases:
continue
if media_type is None:
manifest_list = cls.manifests(package_name, available_releases[0])
else:
manifest_list = [media_type]
view = {
'releases': available_releases,
'default': available_releases[0],
'manifests': manifest_list,
'name': package_name,
'visibility': 'public',
'created_at': created_at}
result.append(view)
return result
@classmethod
def isdeleted_release(cls, package, release):
""" TODO """
return False
def _save(self, force=False, **kwargs):
return self.index.add_release(self.data, self.release, self.media_type, force)
@classmethod
def search(cls, query, **kwargs):
index = cls.index_class()
searchindex = '\n'.join(index.package_names())
return re.findall(r"(.*%s.*)" % query, searchindex)
@classmethod
def _delete(cls, package, release, media_type):
index = cls.index_class(package)
return index.delete_release(release, media_type)
@classmethod
def reindex(cls):
raise Unsupported("Reindex is not yet supported")
@classmethod
def manifests(cls, package, release):
index = cls.index_class(package)
return index.release_manifests(release).keys()
| {
"repo_name": "app-registry/appr",
"path": "appr/models/kv/package_kv_base.py",
"copies": "2",
"size": "3858",
"license": "apache-2.0",
"hash": 7007768594427096000,
"line_mean": 33.7567567568,
"line_max": 99,
"alpha_frac": 0.5974598237,
"autogenerated": false,
"ratio": 4.2349066959385295,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5832366519638529,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import re
_pattern_type = type(re.compile(''))
def normalize(r):
"""Normalize a regular expression by ensuring that it is wrapped with:
'^' and '$'
Parameters
----------
r : str or Pattern
The pattern to normalize.
Returns
-------
p : Pattern
The compiled regex.
"""
if isinstance(r, _pattern_type):
r = r.pattern
return re.compile('^' + r.lstrip('^').rstrip('$') + '$')
class RegexDispatcher(object):
"""
Regular Expression Dispatcher
>>> f = RegexDispatcher('f')
>>> f.register('\d*')
... def parse_int(s):
... return int(s)
>>> f.register('\d*\.\d*')
... def parse_float(s):
... return float(s)
Set priorities to break ties between multiple matches.
Default priority is set to 10
>>> f.register('\w*', priority=9)
... def parse_str(s):
... return s
>>> type(f('123'))
int
>>> type(f('123.456'))
float
"""
def __init__(self, name):
self.name = name
self.funcs = {}
self.priorities = {}
def add(self, regex, func, priority=10):
self.funcs[normalize(regex)] = func
self.priorities[func] = priority
def register(self, regex, priority=10):
"""Register a new handler in this regex dispatcher.
Parameters
----------
regex : str or Pattern
The pattern to match against.
priority : int, optional
The priority for this pattern. This is used to resolve ambigious
matches. The highest priority match wins.
Returns
-------
decorator : callable
A decorator that registers the function with this RegexDispatcher
but otherwise returns the function unchanged.
"""
def _(func):
self.add(regex, func, priority)
return func
return _
def dispatch(self, s):
funcs = (func for r, func in self.funcs.items() if r.match(s))
return max(funcs, key=self.priorities.get)
def __call__(self, s, *args, **kwargs):
return self.dispatch(s)(s, *args, **kwargs)
@property
def __doc__(self):
# take the min to give the docstring of the last fallback function
return min(self.priorities.items(), key=lambda x: x[1])[0].__doc__
| {
"repo_name": "ContinuumIO/odo",
"path": "odo/regex.py",
"copies": "4",
"size": "2416",
"license": "bsd-3-clause",
"hash": -7697779489612305000,
"line_mean": 24.1666666667,
"line_max": 77,
"alpha_frac": 0.5587748344,
"autogenerated": false,
"ratio": 4.158347676419965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6717122510819966,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import requests
from flask import json
import flask
from dynd import nd
from datashape import dshape
from ..data.core import DataDescriptor
from ..data.utils import coerce
from .index import emit_index
# These are a hack for testing
# It's convenient to use requests for live production but use
# flask for testing. Sadly they have different Response objects,
# hence the dispatched functions
def content(response):
if isinstance(response, flask.Response):
return response.data
if isinstance(response, requests.Response):
return response.content
def ok(response):
if isinstance(response, flask.Response):
return 'OK' in response.status
if isinstance(response, requests.Response):
return response.ok
def reason(response):
if isinstance(response, flask.Response):
return response.status
if isinstance(response, requests.Response):
return response.reason
class Client(DataDescriptor):
__slots__ = 'uri', 'name'
def __init__(self, url, name):
self.url = url.strip('/')
self.name = name
def _get_data(self, key):
response = requests.put('%s/data/%s.json' % (self.url, self.name),
data=json.dumps({'index': emit_index(key)}),
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'})
if not ok(response):
raise ValueError("Bad Response: %s" % reason(response))
data = json.loads(content(response))
return data['datashape'], data['data']
def get_py(self, key):
dshape, data = self._get_data(key)
return coerce(dshape, data)
def get_dynd(self, key):
dshape, data = self._get_data(key)
return nd.array(data, type=str(dshape))
@property
def dshape(self):
response = requests.get('%s/datasets.json' % self.url)
if not ok(response):
raise ValueError("Bad Response: %s" % reason(response))
data = json.loads(content(response))
return dshape(data[self.name])
| {
"repo_name": "aterrel/blaze",
"path": "blaze/serve/client.py",
"copies": "1",
"size": "2186",
"license": "bsd-3-clause",
"hash": -4342229812178546000,
"line_mean": 28.9452054795,
"line_max": 78,
"alpha_frac": 0.6285452882,
"autogenerated": false,
"ratio": 4.0783582089552235,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0036959564051488865,
"num_lines": 73
} |
from __future__ import absolute_import, division, print_function
import requests
from flask import json
import flask
from dynd import nd
from datashape import dshape
from ..data import DataDescriptor
from ..data.utils import coerce
from ..expr import Expr
from ..dispatch import dispatch
from .index import emit_index
from ..resource import resource
from .server import DEFAULT_PORT
# These are a hack for testing
# It's convenient to use requests for live production but use
# flask for testing. Sadly they have different Response objects,
# hence the dispatched functions
__all__ = 'Client', 'ExprClient'
def content(response):
if isinstance(response, flask.Response):
return response.data
if isinstance(response, requests.Response):
return response.content
def ok(response):
if isinstance(response, flask.Response):
return 'OK' in response.status
if isinstance(response, requests.Response):
return response.ok
def reason(response):
if isinstance(response, flask.Response):
return response.status
if isinstance(response, requests.Response):
return response.reason
class Client(object):
""" Expression Client for Blaze Server
Parameters
----------
url: str
URL of a Blaze server
name: str
Name of dataset on that server
Examples
--------
>>> # This example matches with the docstring of ``Server``
>>> ec = Client('localhost:6363', 'accounts')
>>> t = Data(ec) # doctest: +SKIP
See Also
--------
blaze.server.server.Server
"""
__slots__ = 'url', 'dataname'
def __init__(self, url, name, **kwargs):
url = url.strip('/')
if not url[:4] == 'http':
url = 'http://' + url
self.url = url
self.dataname = name
@property
def dshape(self):
response = requests.get('%s/datasets.json' % self.url)
if not ok(response):
raise ValueError("Bad Response: %s" % reason(response))
data = json.loads(content(response))
return dshape(data[self.dataname])
def ExprClient(*args, **kwargs):
import warnings
warnings.warn("Deprecated use `Client` instead", DeprecationWarning)
return Client(*args, **kwargs)
@dispatch(Client)
def discover(ec):
return ec.dshape
@dispatch(Expr, Client)
def compute_down(expr, ec):
from .server import to_tree
from ..api import Data
from ..api import into
from pandas import DataFrame
tree = to_tree(expr, {expr._leaves()[0]: ec.dataname})
r = requests.get('%s/compute.json' % ec.url,
data = json.dumps({'expr': tree}),
headers={'Content-Type': 'application/json'})
if not ok(r):
raise ValueError("Bad response: %s" % reason(r))
data = json.loads(content(r))
return data['data']
@resource.register('blaze://.+')
def resource_blaze(uri, name, **kwargs):
uri = uri[len('blaze://'):]
sp = uri.split('/')
tld, rest = sp[0], sp[1:]
if ':' not in tld:
tld = tld + ':%d' % DEFAULT_PORT
uri = '/'.join([tld] + list(rest))
return Client(uri, name, **kwargs)
| {
"repo_name": "vitan/blaze",
"path": "blaze/server/client.py",
"copies": "1",
"size": "3171",
"license": "bsd-3-clause",
"hash": -2218577047432556000,
"line_mean": 24.368,
"line_max": 72,
"alpha_frac": 0.628508357,
"autogenerated": false,
"ratio": 3.8765281173594133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5005036474359413,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import requests
import appr
from appr.commands.command_base import CommandBase
class VersionCmd(CommandBase):
name = 'version'
help_message = "show versions"
def __init__(self, options):
super(VersionCmd, self).__init__(options)
self.api_version = None
self.client_version = None
self.registry_host = options.registry_host
self.ssl_verify = options.cacert or not options.insecure
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_arg(parser)
def _api_version(self):
api_version = None
try:
client = self.RegistryClient(self.registry_host, requests_verify=self.ssl_verify)
api_version = client.version()
except requests.exceptions.RequestException:
api_version = ".. Connection error"
return api_version
def _cli_version(self):
return appr.__version__
def _version(self):
return {'api-version': self._api_version(), "client-version": self._cli_version()}
def _call(self):
version = self._version()
self.api_version = version['api-version']
self.client_version = version['client-version']
def _render_dict(self):
return {"api-version": self.api_version, "client-version": self.client_version}
def _render_console(self):
return "\n".join([
"Api-version: %s" % self.api_version,
"Client-version: %s" % self.client_version])
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/version.py",
"copies": "2",
"size": "1555",
"license": "apache-2.0",
"hash": -2078140856498269000,
"line_mean": 30.1,
"line_max": 93,
"alpha_frac": 0.6302250804,
"autogenerated": false,
"ratio": 3.9567430025445294,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5586968082944529,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sas7bdat
from sas7bdat import SAS7BDAT
import datashape
from datashape import discover, dshape, var, Record, date_, datetime_
from collections import Iterator
import pandas as pd
from .pandas import coerce_datetimes
from ..append import append
from ..convert import convert
from ..resource import resource
@resource.register('.+\.(sas7bdat)')
def resource_sas(uri, **kwargs):
return SAS7BDAT(uri, **kwargs)
@discover.register(SAS7BDAT)
def discover_sas(f, **kwargs):
lines = f.readlines()
next(lines) # burn header
ln = next(lines)
types = map(discover, ln)
names = [col.name.decode("utf-8") for col in f.header.parent.columns]
return var * Record(list(zip(names, types)))
@convert.register(pd.DataFrame, SAS7BDAT, cost=4.0)
def sas_to_DataFrame(s, dshape=None, **kwargs):
df = s.to_data_frame()
if any(typ in (date_, datetime_) for typ in dshape.measure.types):
df = coerce_datetimes(df)
names = [col.decode('utf-8') for col in s.column_names]
df = df[names] # Reorder names to match sasfile
return df
@convert.register(Iterator, SAS7BDAT, cost=1.0)
def sas_to_iterator(s, **kwargs):
lines = s.readlines()
if not s.skip_header:
next(lines) # burn
return lines
| {
"repo_name": "cowlicks/odo",
"path": "odo/backends/sas.py",
"copies": "10",
"size": "1321",
"license": "bsd-3-clause",
"hash": -1694448508824651300,
"line_mean": 27.7173913043,
"line_max": 73,
"alpha_frac": 0.6956850871,
"autogenerated": false,
"ratio": 3.2377450980392157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0013121817469643558,
"num_lines": 46
} |
from __future__ import absolute_import, division, print_function
import shutil
import sys
from os.path import join as pjoin
from tempfile import mkdtemp
import numpy as np
import six
from numpy import memmap, float32, array
from .common import Benchmark, get_squares_, get_indexes_, get_indexes_rand_
class Indexing(Benchmark):
params = [["indexes_", "indexes_rand_"],
['I', ':,I', 'np.ix_(I, I)'],
['', '=1']]
param_names = ['indexes', 'sel', 'op']
def setup(self, indexes, sel, op):
sel = sel.replace('I', indexes)
ns = {'squares_': get_squares_(),
'np': np,
'indexes_': get_indexes_(),
'indexes_rand_': get_indexes_rand_()}
if sys.version_info[0] >= 3:
code = "def run():\n for a in squares_.values(): a[%s]%s"
else:
code = "def run():\n for a in squares_.itervalues(): a[%s]%s"
code = code % (sel, op)
six.exec_(code, ns)
self.func = ns['run']
def time_op(self, indexes, sel, op):
self.func()
class IndexingSeparate(Benchmark):
def setup(self):
self.tmp_dir = mkdtemp()
self.fp = memmap(pjoin(self.tmp_dir, 'tmp.dat'),
dtype=float32, mode='w+', shape=(50, 60))
self.indexes = array([3, 4, 6, 10, 20])
def teardown(self):
del self.fp
shutil.rmtree(self.tmp_dir)
def time_mmap_slicing(self):
for i in range(1000):
self.fp[5:10]
def time_mmap_fancy_indexing(self):
for i in range(1000):
self.fp[self.indexes]
class IndexingStructured0D(Benchmark):
def setup(self):
self.dt = np.dtype([('a', 'f4', 256)])
self.A = np.zeros((), self.dt)
self.B = self.A.copy()
self.a = np.zeros(1, self.dt)[0]
self.b = self.a.copy()
def time_array_slice(self):
self.B['a'][:] = self.A['a']
def time_array_all(self):
self.B['a'] = self.A['a']
def time_scalar_slice(self):
self.b['a'][:] = self.a['a']
def time_scalar_all(self):
self.b['a'] = self.a['a']
| {
"repo_name": "DailyActie/Surrogate-Model",
"path": "01-codes/numpy-master/benchmarks/benchmarks/bench_indexing.py",
"copies": "1",
"size": "2151",
"license": "mit",
"hash": -8066889706613064000,
"line_mean": 25.2317073171,
"line_max": 76,
"alpha_frac": 0.5323105532,
"autogenerated": false,
"ratio": 3.278963414634146,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4311273967834146,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import signal
from contextlib import contextmanager
class TimeoutError(Exception):
pass
@contextmanager
def timeout(seconds):
def signal_handler(signum, frame):
raise TimeoutError("Timed out!")
if seconds > 0:
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
else: # infinite timeout
yield
class RemoteException(Exception):
""" Remote Exception
Contains the exception and traceback from a remotely run task
- Include the original error message
- Respond to try-except blocks with original error type
- Include remote traceback
"""
def __init__(self, exception, traceback):
self.exception = exception
self.traceback = traceback
def __str__(self):
return (str(self.exception) + "\n\n"
"Traceback\n"
"---------\n" +
self.traceback)
def __dir__(self):
return sorted(set(dir(type(self)) +
list(self.__dict__) +
dir(self.exception)))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
return getattr(self.exception, key)
exceptions = dict()
def remote_exception(exc, tb):
""" Metaclass that wraps exception type in RemoteException """
if type(exc) in exceptions:
typ = exceptions[type(exc)]
return typ(exc, tb)
else:
try:
typ = type(exc.__class__.__name__,
(RemoteException, type(exc)),
{'exception_type': type(exc)})
exceptions[type(exc)] = typ
return typ(exc, tb)
except TypeError:
return exc
| {
"repo_name": "lensacom/satyr",
"path": "mentor/utils.py",
"copies": "1",
"size": "1915",
"license": "apache-2.0",
"hash": -2379503526841242000,
"line_mean": 24.8783783784,
"line_max": 66,
"alpha_frac": 0.5644908616,
"autogenerated": false,
"ratio": 4.63680387409201,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.570129473569201,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import six
from represent import autorepr
__all__ = (
'PickleConfig',
'JSONConfig',
'MessagePackConfig',
)
class BackendConfig(object):
"""Base class for :py:class:`~bucketcache.backends.Backend` configuration
classes.
"""
def asdict(self):
return {k: v for k, v in six.iteritems(self.__dict__)
if not k.startswith('_')}
@autorepr
class PickleConfig(BackendConfig):
"""Configuration class for :py:class:`~bucketcache.backends.PickleBackend`
Parameters reflect those given to :py:func:`pickle.dump` and
:py:func:`pickle.load`.
.. note::
On Python 2, the following parameters are ignored:
- `fix_imports`
- `encoding`
- `errors`
"""
def __init__(self, protocol=2, fix_imports=True,
encoding='ASCII', errors='strict'):
self.protocol = protocol
self.fix_imports = fix_imports
self.encoding = encoding
self.errors = errors
super(PickleConfig, self).__init__()
@autorepr
class JSONConfig(BackendConfig):
"""Configuration class for :py:class:`~bucketcache.backends.JSONBackend`
Parameters reflect those given to :py:func:`json.dump` and
:py:func:`json.load`.
.. note::
:py:func:`json.dump` and :py:func:`json.load` both have parameters
called `cls`, so they are named `dump_cls` and `load_cls`.
"""
def __init__(self, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, dump_cls=None, indent=None, separators=None,
default=None, sort_keys=False, load_cls=None,
object_hook=None, parse_float=None, parse_int=None,
parse_constant=None, object_pairs_hook=None):
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.dump_cls = dump_cls
self.indent = indent
self.separators = separators
self.default = default
self.sort_keys = sort_keys
self.load_cls = load_cls
self.object_hook = object_hook
self.parse_float = parse_float
self.parse_int = parse_int
self.parse_constant = parse_constant
self.object_pairs_hook = object_pairs_hook
super(JSONConfig, self).__init__()
@autorepr
class MessagePackConfig(BackendConfig):
"""Configuration class for :py:class:`~bucketcache.backends.MessagePackBackend`
Parameters reflect those given to :py:func:`msgpack.pack` and
:py:func:`msgpack.unpack`.
.. note::
:py:func:`msgpack.pack` and :py:func:`msgpack.unpack` both have
parameters called `encoding`, so they are named `pack_encoding` and
`unpack_encoding`.
"""
def __init__(self, default=None, pack_encoding='utf-8',
unicode_errors='strict', use_single_float=False, autoreset=1,
use_bin_type=1, object_hook=None, list_hook=None, use_list=1,
unpack_encoding='utf-8', object_pairs_hook=None):
self.default = default
self.pack_encoding = pack_encoding
self.unicode_errors = unicode_errors
self.use_single_float = use_single_float
self.autoreset = autoreset
self.use_bin_type = use_bin_type
self.object_hook = object_hook
self.list_hook = list_hook
self.use_list = use_list
self.unpack_encoding = unpack_encoding
self.object_pairs_hook = object_pairs_hook
super(MessagePackConfig, self).__init__()
| {
"repo_name": "RazerM/bucketcache",
"path": "bucketcache/config.py",
"copies": "1",
"size": "3652",
"license": "mit",
"hash": 4514281038593082000,
"line_mean": 30.7565217391,
"line_max": 83,
"alpha_frac": 0.6234939759,
"autogenerated": false,
"ratio": 3.8645502645502647,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4988044240450265,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import six
import mesos_pb2
import operator
from uuid import uuid4
from functools import partial
from proxo import MessageProxy
if six.PY3:
def cmp(a, b):
return (a > b) - (a < b)
# mixin class for Python3 supporting __cmp__
class Comparable(object):
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __le__(self, other):
return self.__cmp__(other) <= 0
else:
class Comparable(object):
pass
class Environment(MessageProxy):
proto = mesos_pb2.Environment
class Scalar(MessageProxy):
proto = mesos_pb2.Value.Scalar
class Resource(MessageProxy):
proto = mesos_pb2.Resource
class ScalarResource(Comparable, Resource):
# supports comparison and basic arithmetics with scalars
proto = mesos_pb2.Resource(type=mesos_pb2.Value.SCALAR)
def __init__(self, value=None, **kwargs):
super(Resource, self).__init__(**kwargs)
if value is not None:
self.scalar = Scalar(value=value)
def __cmp__(self, other):
first, second = float(self), float(other)
if first < second:
return -1
elif first > second:
return 1
else:
return 0
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__, self.scalar.value)
def __float__(self):
return float(self.scalar.value)
@classmethod
def _op(cls, op, first, second):
value = op(float(first), float(second))
return cls(value=value)
def __add__(self, other):
return self._op(operator.add, self, other)
def __radd__(self, other):
return self._op(operator.add, other, self)
def __sub__(self, other):
return self._op(operator.sub, self, other)
def __rsub__(self, other):
return self._op(operator.sub, other, self)
def __mul__(self, other):
return self._op(operator.mul, self, other)
def __rmul__(self, other):
return self._op(operator.mul, other, self)
def __truediv__(self, other):
return self._op(operator.truediv, self, other)
def __rtruediv__(self, other):
return self._op(operator.truediv, other, self)
def __iadd__(self, other):
self.scalar.value = float(self._op(operator.add, self, other))
return self
def __isub__(self, other):
self.scalar.value = float(self._op(operator.sub, self, other))
return self
class Cpus(ScalarResource):
proto = mesos_pb2.Resource(name='cpus', type=mesos_pb2.Value.SCALAR)
class Mem(ScalarResource):
proto = mesos_pb2.Resource(name='mem', type=mesos_pb2.Value.SCALAR)
class Disk(ScalarResource):
proto = mesos_pb2.Resource(name='disk', type=mesos_pb2.Value.SCALAR)
class ResourcesMixin(Comparable):
@classmethod
def _cast_zero(cls, other=0):
if other == 0:
return cls(resources=[Cpus(0), Mem(0), Disk(0)])
else:
return other
@property
def cpus(self):
for res in self.resources:
if isinstance(res, Cpus):
return res
return Cpus(0.0)
@property
def mem(self):
for res in self.resources:
if isinstance(res, Mem):
return res
return Mem(0.0)
@property
def disk(self):
for res in self.resources:
if isinstance(res, Disk):
return res
return Disk(0.0)
def __repr__(self):
return '<{}: {}>'.format(self.__class__.__name__,
', '.join(map(str, self.resources)))
def __cmp__(self, other):
other = self._cast_zero(other)
if all([self.cpus < other.cpus,
self.mem < other.mem,
self.disk < other.disk]):
# all resources are smaller the task will fit into offer
return -1
elif any([self.cpus > other.cpus,
self.mem > other.mem,
self.disk > other.disk]):
# any resources is bigger task won't fit into offer
return 1
else:
return 0
def __radd__(self, other): # to support sum()
other = self._cast_zero(other)
return self + other
def __add__(self, other):
other = self._cast_zero(other)
# ports = list(set(self.ports) | set(other.ports))
cpus = self.cpus + other.cpus
mem = self.mem + other.mem
disk = self.disk + other.disk
mixin = self.__class__()
mixin.resources = [cpus, disk, mem]
return mixin
def __sub__(self, other):
other = self._cast_zero(other)
# ports = list(set(self.ports) | set(other.ports))
cpus = self.cpus - other.cpus
mem = self.mem - other.mem
disk = self.disk - other.disk
mixin = self.__class__()
mixin.resources = [cpus, disk, mem]
return mixin
def __iadd__(self, other):
other = self._cast_zero(other)
added = self + other
self.resources = added.resources
return self
def __isub__(self, other):
other = self._cast_zero(other)
subbed = self - other
self.resources = subbed.resources
return self
class FrameworkID(MessageProxy):
proto = mesos_pb2.FrameworkID
class SlaveID(MessageProxy):
proto = mesos_pb2.SlaveID
class ExecutorID(MessageProxy):
proto = mesos_pb2.ExecutorID
class OfferID(MessageProxy):
proto = mesos_pb2.OfferID
class TaskID(MessageProxy):
proto = mesos_pb2.TaskID
class FrameworkInfo(MessageProxy):
proto = mesos_pb2.FrameworkInfo
class ExecutorInfo(MessageProxy):
proto = mesos_pb2.ExecutorInfo
def __init__(self, id=None, **kwargs):
super(ExecutorInfo, self).__init__(**kwargs)
self.id = id or str(uuid4())
@property
def id(self): # more consistent naming
return self['executor_id']
@id.setter
def id(self, value):
if not isinstance(value, ExecutorID):
value = ExecutorID(value=value)
self['executor_id'] = value
class MasterInfo(MessageProxy):
proto = mesos_pb2.MasterInfo
class SlaveInfo(MessageProxy):
proto = mesos_pb2.SlaveInfo
class Filters(MessageProxy):
proto = mesos_pb2.Filters
class TaskStatus(MessageProxy):
proto = mesos_pb2.TaskStatus
@property
def task_id(self): # more consistent naming
return self['task_id']
@task_id.setter
def task_id(self, value):
if not isinstance(value, TaskID):
value = TaskID(value=value)
self['task_id'] = value
def is_staging(self):
return self.state == 'TASK_STAGING'
def is_starting(self):
return self.state == 'TASK_STARTING'
def is_running(self):
return self.state == 'TASK_RUNNING'
def has_finished(self):
return self.state == 'TASK_FINISHED'
def has_succeeded(self):
return self.state == 'TASK_FINISHED'
def has_killed(self):
return self.state == 'TASK_KILLED'
def has_failed(self):
return self.state in ['TASK_FAILED', 'TASK_LOST', 'TASK_KILLED',
'TASK_ERROR']
def has_terminated(self):
return self.has_succeeded() or self.has_failed()
class Offer(ResourcesMixin, MessageProxy): # important order!
proto = mesos_pb2.Offer
class TaskInfo(ResourcesMixin, MessageProxy):
proto = mesos_pb2.TaskInfo
def __init__(self, id=None, **kwargs):
super(TaskInfo, self).__init__(**kwargs)
self.id = id or str(uuid4())
self.status = TaskStatus(task_id=self.id, state='TASK_STAGING')
@property
def id(self): # more consistent naming
return self['task_id']
@id.setter
def id(self, value):
if not isinstance(value, TaskID):
value = TaskID(value=value)
self['task_id'] = value
class CommandInfo(MessageProxy):
proto = mesos_pb2.CommandInfo
class ContainerInfo(MessageProxy):
proto = mesos_pb2.ContainerInfo
class DockerInfo(MessageProxy):
proto = mesos_pb2.ContainerInfo.DockerInfo
class Request(MessageProxy):
proto = mesos_pb2.Request
class Operation(MessageProxy):
proto = mesos_pb2.Offer.Operation
| {
"repo_name": "kszucs/proxo",
"path": "proxo/tests/mesos.py",
"copies": "1",
"size": "8643",
"license": "apache-2.0",
"hash": -8698413069961049000,
"line_mean": 23.9077809798,
"line_max": 76,
"alpha_frac": 0.5882216823,
"autogenerated": false,
"ratio": 3.720619888075764,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4808841570375764,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import six
import numpy as np
import copy
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
from matplotlib.ticker import NullLocator
def mark_region(ax, low, high, vline_style, span_style):
"""
Mark a region of a graph with vertical lines and an axvspan
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
low, high : float
The low and high threshold values, points for `low < x < high` are
styled using `inner_style` and points for `x < low or x > high` are
styled using `outer_style`
vline_style : dict, optional
The style to use for the vertical lines
span_stlye : dict, optional
Style for axvspan behind central region
Returns
-------
vline_low, vline_hi : Line2D
Vertical lines at the thresholds
hspan : Patch
Patch over middle region
"""
# add vertical lines
vline_low = ax.axvline(low, **vline_style)
vline_high = ax.axvline(high, **vline_style)
hspan = ax.axvspan(low, high, **span_style)
return vline_low, vline_high, hspan
def split_plot(ax, x, y, low, high, inner_style, outer_style):
"""
Split styling of line based on the x-value
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
x, y : ndarray
Data, must be same length
low, high : float
The low and high threshold values, points for `low < x < high` are
styled using `inner_style` and points for `x < low or x > high` are
styled using `outer_style`
inner_style, outer_style : dict
Dictionary of styles that can be passed to `ax.plot`
Returns
-------
lower, mid, upper : Line2D
The artists for the lower, midddle, and upper ranges
"""
low_mask = x < low
high_mask = x > high
mid_mask = ~np.logical_or(low_mask, high_mask)
low_mask[1:] |= low_mask[:-1]
high_mask[:-1] |= high_mask[1:]
lower, = ax.plot(x[low_mask], y[low_mask], **outer_style)
mid, = ax.plot(x[mid_mask], y[mid_mask], **inner_style)
upper, = ax.plot(x[high_mask], y[high_mask], **outer_style)
return lower, mid, upper
def show_label_array(ax, label_array, cmap=None, **kwargs):
"""
Display a labeled array nicely
Additional kwargs are passed through to `ax.imshow`.
If `vmin` is in kwargs, it is clipped to minimum of 0.5.
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
label_array : ndarray
Expected to be an unsigned integer array. 0 is background,
positive integers label region of interent
cmap : str or colormap, optional
Color map to use, defaults to 'Paired'
Returns
-------
img : AxesImage
The artist added to the axes
"""
if cmap is None:
cmap = 'Paired'
_cmap = copy.copy((mcm.get_cmap(cmap)))
_cmap.set_under('w', 0)
vmin = max(.5, kwargs.pop('vmin', .5))
ax.set_aspect('equal')
im = ax.imshow(label_array, cmap=_cmap,
interpolation='nearest',
vmin=vmin,
**kwargs)
return im
def binary_state_lines(ax, data, xmin, xmax,
delta_y=3,
off_color=None,
on_color=None,
lc_kwargs=None):
"""
Draw series of lines indicating the state of (many) indicators.
Parameters
----------
ax : Axes
The axes to draw stuff to
data : OrderedDict
The data as an ordered dict. The keys will be used as ytick labels
keyed on the data label. The values are a list of edge pairs where
the value is 'high'; ex ``data[k] = [(1, 2), (3, 5.5)]`` is 'high' in
the ranges 1 to 2 and 3 to 5.5 and 'low' everywhere else.
The lines are drawn in order from the top down.
xmin, xmax : float
The minimum and maximum limits for the x values
delta_y : float, optional
The spacing between lines
off_color, on_color : color, optional
The colors to use for the the off/on state.
Default to "#1C2F4D" (blueish) and "#FA9B00" (yellowish) respectively
lc_kwargs : dict, optional
kwargs to pass through the the LineCollection init method. If not
given defaults to ``{'lw': 10}``
Returns
-------
ret : dict
dictionary of the collections added keyed on the label
"""
if lc_kwargs is None:
lc_kwargs = dict()
if 'lw' not in lc_kwargs:
lc_kwargs['lw'] = 10
if off_color is None:
off_color = "#1C2F4D"
if on_color is None:
on_color = "#FA9B00"
# base offset
y_val = 0
# make the color map and norm
cmap = ListedColormap([off_color, on_color])
norm = BoundaryNorm([0, 0.5, 1], cmap.N)
# dictionary to hold the returned artists
ret = dict()
# loop over the input data draw each collection
for label, d in data.items():
# increment the y offset
y_val += delta_y
# turn the high windows on to alternating
# high/low regions
x = np.asarray(d).ravel()
# assign the high/low state to each one
state = np.mod(1 + np.arange(len(x)), 2)
# deal with boundary conditions to be off
# at start/end
if x[0] > xmin:
x = np.r_[xmin, x]
state = np.r_[0, state]
if x[-1] < xmax:
x = np.r_[x, xmax]
state = np.r_[state, 0]
# make the matching y values
y = np.ones(len(x)) * y_val
# call helper function to create the collection
coll = _draw_segments(ax, x, y, state,
cmap, norm, lc_kwargs)
ret[label] = coll
# set up the axes limits
ax.set_xlim(xmin, xmax)
ax.set_ylim(0, y_val + delta_y)
# turn off x-ticks
ax.xaxis.set_major_locator(NullLocator())
# make the y-ticks be labeled as per the input
ax.yaxis.set_ticks((1 + np.arange(len(data))) * delta_y)
ax.yaxis.set_ticklabels(list(data.keys()))
# invert so that the first data is at the top
ax.invert_yaxis()
# turn off the frame and patch
ax.set_frame_on(False)
# return the added artists
return ret
def _draw_segments(ax, x, y, state, cmap, norm, lc_kwargs):
"""
helper function to turn boundary edges into the input LineCollection
expects.
Parameters
----------
ax : Axes
The axes to draw to
x, y, state : array
The x edges, the y values and the state of each region
cmap : matplotlib.colors.Colormap
The color map to use
norm : matplotlib.ticker.Norm
The norm to use with the color map
lc_kwargs : dict
kwargs to pass through to LineCollection
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(segments, cmap=cmap, norm=norm, **lc_kwargs)
lc.set_array(state)
ax.add_collection(lc)
return lc
| {
"repo_name": "sameera2004/xray-vision",
"path": "xray_vision/mpl_plotting/misc.py",
"copies": "3",
"size": "7210",
"license": "bsd-3-clause",
"hash": -4025850691927045600,
"line_mean": 26.8378378378,
"line_max": 77,
"alpha_frac": 0.5911234397,
"autogenerated": false,
"ratio": 3.689866939611054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5780990379311054,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import six
import numpy as np
import matplotlib.cm as mcm
import copy
def mark_region(ax, low, high, vline_style, span_style):
"""
Mark a region of a graph with vertical lines and an axvspan
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
low, high : float
The low and high threshold values, points for `low < x < high` are
styled using `inner_style` and points for `x < low or x > high` are
styled using `outer_style`
vline_style : dict, optional
The style to use for the vertical lines
span_stlye : dict, optional
Style for axvspan behind central region
Returns
-------
vline_low, vline_hi : Line2D
Vertical lines at the thresholds
hspan : Patch
Patch over middle region
"""
# add vertical lines
vline_low = ax.axvline(low, **vline_style)
vline_high = ax.axvline(high, **vline_style)
hspan = ax.axvspan(low, high, **span_style)
return vline_low, vline_high, hspan
def split_plot(ax, x, y, low, high, inner_style, outer_style):
"""
Split styling of line based on the x-value
Parameters
----------
ax : Axes
The `Axes` object to add the artist too
x, y : ndarray
Data, must be same length
low, high : float
The low and high threshold values, points for `low < x < high` are
styled using `inner_style` and points for `x < low or x > high` are
styled using `outer_style`
inner_style, outer_style : dict
Dictionary of styles that can be passed to `ax.plot`
Returns
-------
lower, mid, upper : Line2D
The artists for the lower, midddle, and upper ranges
"""
low_mask = x < low
high_mask = x > high
mid_mask = ~np.logical_or(low_mask, high_mask)
low_mask[1:] |= low_mask[:-1]
high_mask[:-1] |= high_mask[1:]
lower, = ax.plot(x[low_mask], y[low_mask], **outer_style)
mid, = ax.plot(x[mid_mask], y[mid_mask], **inner_style)
upper, = ax.plot(x[high_mask], y[high_mask], **outer_style)
return lower, mid, upper
| {
"repo_name": "ericdill/xray-vision",
"path": "xray_vision/mpl_plotting/misc.py",
"copies": "1",
"size": "2193",
"license": "bsd-3-clause",
"hash": 2460200398316804600,
"line_mean": 24.5,
"line_max": 75,
"alpha_frac": 0.6096671227,
"autogenerated": false,
"ratio": 3.480952380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4590619503652381,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import six
from collections import defaultdict
from datetime import datetime, timedelta
from django.db.models import Q
from django.utils import timezone
from six.moves import reduce
from sentry.constants import STATUS_CHOICES
from sentry.models import EventUser, Release, User
from sentry.search.base import ANY, EMPTY
from sentry.utils.auth import find_users
class InvalidQuery(Exception):
pass
def parse_release(project, value):
# TODO(dcramer): add environment support
if value == 'latest':
value = Release.objects.extra(select={
'sort': 'COALESCE(date_released, date_added)',
}).order_by('-sort').values_list('version', flat=True).first()
if value is None:
return EMPTY
return value
def get_user_tag(project, key, value):
# TODO(dcramer): do something with case of multiple matches
try:
lookup = EventUser.attr_from_keyword(key)
euser = EventUser.objects.filter(
project=project,
**{lookup: value}
)[0]
except (KeyError, IndexError):
return '{}:{}'.format(key, value)
return euser.tag_value
def parse_datetime_range(value):
try:
flag, count, interval = value[0], int(value[1:-1]), value[-1]
except (ValueError, TypeError):
raise InvalidQuery('{} is not a valid datetime query'.format(value))
if flag not in ('+', '-'):
raise InvalidQuery('{} is not a valid datetime query'.format(value))
if interval == 'h':
delta = timedelta(hours=count)
elif interval == 'w':
delta = timedelta(days=count * 7)
elif interval == 'd':
delta = timedelta(days=count)
elif interval == 'm':
delta = timedelta(minutes=count)
else:
raise InvalidQuery('{} is not a valid datetime query'.format(value))
if flag == '-':
return (timezone.now() - delta, None)
else:
return (None, timezone.now() - delta)
def parse_datetime_comparison(value):
# TODO(dcramer): currently inclusitivity is not controllable by the query
# as from date is always inclusive, and to date is always exclusive
if value[:2] in ('>=', '=>'):
return (parse_datetime_value(value[2:])[0], None)
if value[:2] in ('<=', '=<'):
return (None, parse_datetime_value(value[2:])[0])
if value[:1] in ('>'):
return (parse_datetime_value(value[1:])[0], None)
if value[:1] in ('<'):
return (None, parse_datetime_value(value[1:])[0])
if value[0] == '=':
return parse_datetime_value(value[1:])
raise InvalidQuery('{} is not a valid datetime query'.format(value))
def parse_datetime_value(value):
try:
return _parse_datetime_value(value)
except ValueError:
raise InvalidQuery('{} is not a valid datetime query'.format(value))
def _parse_datetime_value(value):
# timezones are not supported and are assumed UTC
if value[-1] == 'Z':
value = value[:-1]
value_len = len(value)
if value_len in (8, 10):
value = datetime.strptime(value, '%Y-%m-%d').replace(
tzinfo=timezone.utc,
)
return [value, value + timedelta(days=1)]
elif value[4] == '-':
try:
value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S').replace(
tzinfo=timezone.utc,
)
except ValueError:
value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f').replace(
tzinfo=timezone.utc,
)
else:
value = datetime.utcfromtimestamp(float(value)).replace(
tzinfo=timezone.utc,
)
return [value - timedelta(minutes=5), value + timedelta(minutes=6)]
def parse_datetime_expression(value):
# result must be (from inclusive, to exclusive)
if value.startswith(('-', '+')):
return parse_datetime_range(value)
if value.startswith(('>', '<', '=', '<=', '>=')):
return parse_datetime_comparison(value)
return parse_datetime_value(value)
def get_date_params(value, from_field, to_field):
date_from, date_to = parse_datetime_expression(value)
result = {}
if date_from:
result.update({
from_field: date_from,
'{}_inclusive'.format(from_field): True,
})
if date_to:
result.update({
to_field: date_to,
'{}_inclusive'.format(to_field): False,
})
return result
def tokenize_query(query):
"""
Tokenizes a standard Sentry search query.
>>> query = 'is:resolved foo bar tag:value'
>>> tokenize_query(query)
{
'is': ['resolved'],
'query': ['foo', 'bar'],
'tag': ['value'],
}
"""
results = defaultdict(list)
tokens = query.split(' ')
tokens_iter = iter(tokens)
for token in tokens_iter:
# ignore empty tokens
if not token:
continue
if ':' not in token:
results['query'].append(token)
continue
key, value = token.split(':', 1)
if not value:
results['query'].append(token)
continue
if value[0] == '"':
nvalue = value
while not nvalue.endswith('"'):
try:
nvalue = six.next(tokens_iter)
except StopIteration:
break
value = '%s %s' % (value, nvalue)
if value.endswith('"'):
value = value[1:-1]
else:
value = value[1:]
results[key].append(value)
return dict(results)
def parse_query(project, query, user):
# TODO(dcramer): handle query being wrapped in quotes
tokens = tokenize_query(query)
results = {'tags': {}, 'query': []}
for key, token_list in six.iteritems(tokens):
for value in token_list:
if key == 'query':
results['query'].append(value)
elif key == 'is':
if value == 'unassigned':
results['unassigned'] = True
elif value == 'assigned':
results['unassigned'] = False
else:
try:
results['status'] = STATUS_CHOICES[value]
except KeyError:
pass
elif key == 'assigned':
if value == 'me':
results['assigned_to'] = user
else:
try:
results['assigned_to'] = find_users(value)[0]
except IndexError:
# XXX(dcramer): hacky way to avoid showing any results when
# an invalid user is entered
results['assigned_to'] = User(id=0)
elif key == 'bookmarks':
if value == 'me':
results['bookmarked_by'] = user
else:
try:
results['bookmarked_by'] = find_users(value)[0]
except IndexError:
# XXX(dcramer): hacky way to avoid showing any results when
# an invalid user is entered
results['bookmarked_by'] = User(id=0)
elif key == 'first-release':
results['first_release'] = parse_release(project, value)
elif key == 'release':
results['tags']['sentry:release'] = parse_release(project, value)
elif key == 'user':
if ':' in value:
comp, value = value.split(':', 1)
else:
comp = 'id'
results['tags']['sentry:user'] = get_user_tag(
project, comp, value)
elif key == 'has':
if value == 'user':
value = 'sentry:user'
elif value == 'release':
value = 'sentry:release'
results['tags'][value] = ANY
elif key == 'age':
results.update(get_date_params(value, 'age_from', 'age_to'))
elif key.startswith('user.'):
results['tags']['sentry:user'] = get_user_tag(
project, key.split('.', 1)[1], value)
elif key == 'event.timestamp':
results.update(get_date_params(value, 'date_from', 'date_to'))
else:
results['tags'][key] = value
results['query'] = ' '.join(results['query'])
return results
def in_iexact(column, values):
from operator import or_
query = '{}__iexact'.format(column)
return reduce(or_, [Q(**{query: v}) for v in values])
| {
"repo_name": "fotinakis/sentry",
"path": "src/sentry/search/utils.py",
"copies": "1",
"size": "8769",
"license": "bsd-3-clause",
"hash": 7160843755235624000,
"line_mean": 31.1208791209,
"line_max": 83,
"alpha_frac": 0.5298209602,
"autogenerated": false,
"ratio": 4.230101302460203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001318851493067521,
"num_lines": 273
} |
from __future__ import absolute_import, division, print_function
import six
from collections import defaultdict
from datetime import datetime, timedelta
from django.utils import timezone
from sentry.constants import STATUS_CHOICES
from sentry.models import EventUser, Release, User
from sentry.search.base import ANY, EMPTY
from sentry.utils.auth import find_users
class InvalidQuery(Exception):
pass
def parse_release(project, value):
# TODO(dcramer): add environment support
if value == 'latest':
value = Release.objects.filter(
project=project,
).extra(select={
'sort': 'COALESCE(date_released, date_added)',
}).order_by('-sort').values_list('version', flat=True).first()
if value is None:
return EMPTY
return value
def get_user_tag(project, key, value):
# TODO(dcramer): do something with case of multiple matches
try:
lookup = EventUser.attr_from_keyword(key)
euser = EventUser.objects.filter(
project=project,
**{lookup: value}
)[0]
except (KeyError, IndexError):
return u'{}:{}'.format(key, value)
return euser.tag_value
def parse_datetime_range(value):
try:
flag, count, interval = value[0], int(value[1:-1]), value[-1]
except (ValueError, TypeError):
raise InvalidQuery(u'{} is not a valid datetime query'.format(value))
if flag not in ('+', '-'):
raise InvalidQuery(u'{} is not a valid datetime query'.format(value))
if interval == 'h':
delta = timedelta(hours=count)
elif interval == 'w':
delta = timedelta(days=count * 7)
elif interval == 'd':
delta = timedelta(days=count)
elif interval == 'm':
delta = timedelta(minutes=count)
else:
raise InvalidQuery(u'{} is not a valid datetime query'.format(value))
if flag == '-':
return (timezone.now() - delta, None)
else:
return (None, timezone.now() - delta)
def parse_datetime_comparison(value):
# TODO(dcramer): currently inclusitivity is not controllable by the query
# as from date is always inclusive, and to date is always exclusive
if value[:2] in ('>=', '=>'):
return (parse_datetime_value(value[2:])[0], None)
if value[:2] in ('<=', '=<'):
return (None, parse_datetime_value(value[2:])[0])
if value[:1] in ('>'):
return (parse_datetime_value(value[1:])[0], None)
if value[:1] in ('<'):
return (None, parse_datetime_value(value[1:])[0])
if value[0] == '=':
return parse_datetime_value(value[1:])
raise InvalidQuery(u'{} is not a valid datetime query'.format(value))
def parse_datetime_value(value):
try:
return _parse_datetime_value(value)
except ValueError:
raise InvalidQuery(u'{} is not a valid datetime query'.format(value))
def _parse_datetime_value(value):
# timezones are not supported and are assumed UTC
if value[-1] == 'Z':
value = value[:-1]
value_len = len(value)
if value_len in (8, 10):
value = datetime.strptime(value, '%Y-%m-%d').replace(
tzinfo=timezone.utc,
)
return [value, value + timedelta(days=1)]
elif value[4] == '-':
try:
value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S').replace(
tzinfo=timezone.utc,
)
except ValueError:
value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f').replace(
tzinfo=timezone.utc,
)
else:
value = datetime.utcfromtimestamp(float(value)).replace(
tzinfo=timezone.utc,
)
return [value - timedelta(minutes=5), value + timedelta(minutes=6)]
def parse_datetime_expression(value):
# result must be (from inclusive, to exclusive)
if value.startswith(('-', '+')):
return parse_datetime_range(value)
if value.startswith(('>', '<', '=', '<=', '>=')):
return parse_datetime_comparison(value)
return parse_datetime_value(value)
def get_date_params(value, from_field, to_field):
date_from, date_to = parse_datetime_expression(value)
result = {}
if date_from:
result.update({
from_field: date_from,
'{}_inclusive'.format(from_field): True,
})
if date_to:
result.update({
to_field: date_to,
'{}_inclusive'.format(to_field): False,
})
return result
def tokenize_query(query):
"""
Tokenizes a standard Sentry search query.
>>> query = 'is:resolved foo bar tag:value'
>>> tokenize_query(query)
{
'is': ['resolved'],
'query': ['foo', 'bar'],
'tag': ['value'],
}
"""
results = defaultdict(list)
tokens = query.split(' ')
tokens_iter = iter(tokens)
for token in tokens_iter:
# ignore empty tokens
if not token:
continue
if ':' not in token:
results['query'].append(token)
continue
# this handles quoted string, and is duplicated below
if token[0] == '"':
nvalue = token
while nvalue[-1] != '"':
try:
nvalue = six.next(tokens_iter)
except StopIteration:
break
token = '%s %s' % (token, nvalue)
if token[-1] == '"':
token = token[1:-1]
else:
token = token[1:]
results['query'].append(token)
continue
key, value = token.split(':', 1)
if not value:
results['query'].append(token)
continue
if value[0] == '"':
nvalue = value
while nvalue[-1] != '"':
try:
nvalue = six.next(tokens_iter)
except StopIteration:
break
value = '%s %s' % (value, nvalue)
if value[-1] == '"':
value = value[1:-1]
else:
value = value[1:]
results[key].append(value)
return dict(results)
def parse_query(project, query, user):
# TODO(dcramer): handle query being wrapped in quotes
tokens = tokenize_query(query)
results = {'tags': {}, 'query': []}
for key, token_list in six.iteritems(tokens):
for value in token_list:
if key == 'query':
results['query'].append(value)
elif key == 'is':
if value == 'unassigned':
results['unassigned'] = True
elif value == 'assigned':
results['unassigned'] = False
else:
try:
results['status'] = STATUS_CHOICES[value]
except KeyError:
pass
elif key == 'assigned':
if value == 'me':
results['assigned_to'] = user
else:
try:
results['assigned_to'] = find_users(value)[0]
except IndexError:
# XXX(dcramer): hacky way to avoid showing any results when
# an invalid user is entered
results['assigned_to'] = User(id=0)
elif key == 'bookmarks':
if value == 'me':
results['bookmarked_by'] = user
else:
try:
results['bookmarked_by'] = find_users(value)[0]
except IndexError:
# XXX(dcramer): hacky way to avoid showing any results when
# an invalid user is entered
results['bookmarked_by'] = User(id=0)
elif key == 'first-release':
results['first_release'] = parse_release(project, value)
elif key == 'release':
results['tags']['sentry:release'] = parse_release(project, value)
elif key == 'user':
if ':' in value:
comp, value = value.split(':', 1)
else:
comp = 'id'
results['tags']['sentry:user'] = get_user_tag(
project, comp, value)
elif key == 'has':
if value == 'user':
value = 'sentry:user'
elif value == 'release':
value = 'sentry:release'
results['tags'][value] = ANY
elif key == 'age':
results.update(get_date_params(value, 'age_from', 'age_to'))
elif key.startswith('user.'):
results['tags']['sentry:user'] = get_user_tag(
project, key.split('.', 1)[1], value)
elif key == 'event.timestamp':
results.update(get_date_params(value, 'date_from', 'date_to'))
else:
results['tags'][key] = value
results['query'] = ' '.join(results['query'])
return results
| {
"repo_name": "alexm92/sentry",
"path": "src/sentry/search/utils.py",
"copies": "1",
"size": "9117",
"license": "bsd-3-clause",
"hash": -5637075817117275000,
"line_mean": 31.329787234,
"line_max": 83,
"alpha_frac": 0.5171657343,
"autogenerated": false,
"ratio": 4.2503496503496505,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00012767604879696212,
"num_lines": 282
} |
from __future__ import absolute_import, division, print_function
import six
from datetime import timedelta
from django.db import IntegrityError, transaction
from django.utils import timezone
from rest_framework import serializers
from rest_framework.response import Response
from uuid import uuid4
from sentry.app import search
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectEventPermission
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import StreamGroupSerializer
from sentry.constants import DEFAULT_SORT_OPTION
from sentry.db.models.query import create_or_update
from sentry.models import (
Activity, EventMapping, Group, GroupHash, GroupBookmark, GroupResolution, GroupSeen,
GroupSubscription, GroupSubscriptionReason, GroupSnooze, GroupStatus,
Release, TagKey,
)
from sentry.models.group import looks_like_short_id
from sentry.search.utils import parse_query
from sentry.tasks.deletion import delete_group
from sentry.tasks.merge import merge_group
from sentry.utils.cursors import Cursor
from sentry.utils.apidocs import scenario, attach_scenarios
ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', and '14d'"
@scenario('BulkUpdateIssues')
def bulk_update_issues_scenario(runner):
project = runner.default_project
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='PUT',
path='/projects/%s/%s/issues/?id=%s&id=%s' % (
runner.org.slug, project.slug, group1.id, group2.id),
data={'status': 'unresolved', 'isPublic': False}
)
@scenario('BulkRemoveIssuess')
def bulk_remove_issues_scenario(runner):
with runner.isolated_project('Amazing Plumbing') as project:
group1, group2 = Group.objects.filter(project=project)[:2]
runner.request(
method='DELETE',
path='/projects/%s/%s/issues/?id=%s&id=%s' % (
runner.org.slug, project.slug, group1.id, group2.id),
)
@scenario('ListProjectIssuess')
def list_project_issues_scenario(runner):
project = runner.default_project
runner.request(
method='GET',
path='/projects/%s/%s/issues/?statsPeriod=24h' % (
runner.org.slug, project.slug),
)
STATUS_CHOICES = {
'resolved': GroupStatus.RESOLVED,
'unresolved': GroupStatus.UNRESOLVED,
'muted': GroupStatus.MUTED,
'resolvedInNextRelease': GroupStatus.UNRESOLVED,
}
class ValidationError(Exception):
pass
class GroupSerializer(serializers.Serializer):
status = serializers.ChoiceField(choices=zip(
STATUS_CHOICES.keys(), STATUS_CHOICES.keys()
))
hasSeen = serializers.BooleanField()
isBookmarked = serializers.BooleanField()
isPublic = serializers.BooleanField()
isSubscribed = serializers.BooleanField()
merge = serializers.BooleanField()
snoozeDuration = serializers.IntegerField()
class ProjectGroupIndexEndpoint(ProjectEndpoint):
doc_section = DocSection.EVENTS
permission_classes = (ProjectEventPermission,)
def _build_query_params_from_request(self, request, project):
query_kwargs = {
'project': project,
}
if request.GET.get('status'):
try:
query_kwargs['status'] = STATUS_CHOICES[request.GET['status']]
except KeyError:
raise ValidationError('invalid status')
if request.user.is_authenticated() and request.GET.get('bookmarks'):
query_kwargs['bookmarked_by'] = request.user
if request.user.is_authenticated() and request.GET.get('assigned'):
query_kwargs['assigned_to'] = request.user
sort_by = request.GET.get('sort')
if sort_by is None:
sort_by = DEFAULT_SORT_OPTION
query_kwargs['sort_by'] = sort_by
tags = {}
for tag_key in TagKey.objects.all_keys(project):
if request.GET.get(tag_key):
tags[tag_key] = request.GET[tag_key]
if tags:
query_kwargs['tags'] = tags
limit = request.GET.get('limit')
if limit:
try:
query_kwargs['limit'] = int(limit)
except ValueError:
raise ValidationError('invalid limit')
# TODO: proper pagination support
cursor = request.GET.get('cursor')
if cursor:
query_kwargs['cursor'] = Cursor.from_string(cursor)
query = request.GET.get('query', 'is:unresolved').strip()
if query:
query_kwargs.update(parse_query(project, query, request.user))
return query_kwargs
# bookmarks=0/1
# status=<x>
# <tag>=<value>
# statsPeriod=24h
@attach_scenarios([list_project_issues_scenario])
def get(self, request, project):
"""
List a Project's Issues
```````````````````````
Return a list of issues (groups) bound to a project. All parameters are
supplied as query string parameters.
A default query of ``is:resolved`` is applied. To return results
with other statuses send an new query value (i.e. ``?query=`` for all
results).
The ``statsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam bool shortIdLookup: if this is set to true then short IDs are
looked up by this function as well. This
can cause the return value of the function
to return an event issue of a different
project which is why this is an opt-in.
Set to `1` to enable.
:qparam querystring query: an optional Sentry structured search
query. If not provided an implied
``"is:resolved"`` is assumed.)
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:auth: required
"""
stats_period = request.GET.get('statsPeriod')
if stats_period not in (None, '', '24h', '14d'):
return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
elif stats_period is None:
# default
stats_period = '24h'
elif stats_period == '':
# disable stats
stats_period = None
query = request.GET.get('query', '').strip()
if query:
matching_group = None
if len(query) == 32:
# check to see if we've got an event ID
try:
mapping = EventMapping.objects.get(
project_id=project.id,
event_id=query,
)
except EventMapping.DoesNotExist:
pass
else:
matching_group = Group.objects.get(id=mapping.group_id)
# If the query looks like a short id, we want to provide some
# information about where that is. Note that this can return
# results for another project. The UI deals with this.
elif request.GET.get('shortIdLookup') == '1' and \
looks_like_short_id(query):
try:
matching_group = Group.objects.by_qualified_short_id(
project.organization, query)
except Group.DoesNotExist:
matching_group = None
if matching_group is not None:
response = Response(serialize(
[matching_group], request.user, StreamGroupSerializer(
stats_period=stats_period
)
))
response['X-Sentry-Direct-Hit'] = '1'
return response
try:
query_kwargs = self._build_query_params_from_request(request, project)
except ValidationError as exc:
return Response({'detail': six.text_type(exc)}, status=400)
cursor_result = search.query(**query_kwargs)
results = list(cursor_result)
context = serialize(
results, request.user, StreamGroupSerializer(
stats_period=stats_period
)
)
# HACK: remove auto resolved entries
if query_kwargs.get('status') == GroupStatus.UNRESOLVED:
context = [
r for r in context
if r['status'] == 'unresolved'
]
response = Response(context)
response['Link'] = ', '.join([
self.build_cursor_link(request, 'previous', cursor_result.prev),
self.build_cursor_link(request, 'next', cursor_result.next),
])
return response
@attach_scenarios([bulk_update_issues_scenario])
def put(self, request, project):
"""
Bulk Mutate a List of Issues
````````````````````````````
Bulk mutate various attributes on issues. The list of issues
to modify is given through the `id` query parameter. It is repeated
for each issue that should be modified.
- For non-status updates, the `id` query parameter is required.
- For status updates, the `id` query parameter may be omitted
for a batch "update all" query.
- An optional `status` query parameter may be used to restrict
mutations to only events with the given status.
The following attributes can be modified and are supplied as
JSON object in the body:
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be mutated. This
parameter shall be repeated for each issue. It
is optional only if a status is mutated in which
case an implicit `update all` is assumed.
:qparam string status: optionally limits the query to issues of the
specified status. Valid values are
``"resolved"``, ``"unresolved"`` and
``"muted"``.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:param string status: the new status for the issues. Valid values
are ``"resolved"``, ``"unresolved"`` and
``"muted"``.
:param int snoozeDuration: the number of minutes to mute this issue.
:param boolean isPublic: sets the issue to public or private.
:param boolean merge: allows to merge or unmerge different issues.
:param boolean hasSeen: in case this API call is invoked with a user
context this allows changing of the flag
that indicates if the user has seen the
event.
:param boolean isBookmarked: in case this API call is invoked with a
user context this allows changing of
the bookmark flag.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = Group.objects.filter(project=project, id__in=group_ids)
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
if not group_ids:
return Response(status=204)
else:
group_list = None
serializer = GroupSerializer(data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = dict(serializer.object)
acting_user = request.user if request.user.is_authenticated() else None
if not group_ids:
try:
query_kwargs = self._build_query_params_from_request(request, project)
except ValidationError as exc:
return Response({'detail': six.text_type(exc)}, status=400)
# bulk mutations are limited to 1000 items
# TODO(dcramer): it'd be nice to support more than this, but its
# a bit too complicated right now
query_kwargs['limit'] = 1000
cursor_result = search.query(**query_kwargs)
group_list = list(cursor_result)
group_ids = [g.id for g in group_list]
is_bulk = len(group_ids) > 1
queryset = Group.objects.filter(
id__in=group_ids,
)
if result.get('status') == 'resolvedInNextRelease':
try:
release = Release.objects.filter(
project=project,
).order_by('-date_added')[0]
except IndexError:
return Response('{"detail": "No release data present in the system to indicate form a basis for \'Next Release\'"}', status=400)
now = timezone.now()
for group in group_list:
try:
with transaction.atomic():
resolution, created = GroupResolution.objects.create(
group=group,
release=release,
), True
except IntegrityError:
resolution, created = GroupResolution.objects.get(
group=group,
), False
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
if created:
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_RESOLVED_IN_RELEASE,
user=acting_user,
ident=resolution.id,
data={
# no version yet
'version': '',
}
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
activity.send_notification()
queryset.update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
result.update({
'status': 'resolved',
'statusDetails': {
'inNextRelease': True,
},
})
elif result.get('status') == 'resolved':
now = timezone.now()
happened = queryset.exclude(
status=GroupStatus.RESOLVED,
).update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
GroupResolution.objects.filter(
group__in=group_ids,
).delete()
if group_list and happened:
for group in group_list:
group.status = GroupStatus.RESOLVED
group.resolved_at = now
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
activity = Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_RESOLVED,
user=acting_user,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
activity.send_notification()
result['statusDetails'] = {}
elif result.get('status'):
new_status = STATUS_CHOICES[result['status']]
happened = queryset.exclude(
status=new_status,
).update(
status=new_status,
)
GroupResolution.objects.filter(
group__in=group_ids,
).delete()
if new_status == GroupStatus.MUTED:
snooze_duration = result.pop('snoozeDuration', None)
if snooze_duration:
snooze_until = timezone.now() + timedelta(
minutes=snooze_duration,
)
for group in group_list:
GroupSnooze.objects.create_or_update(
group=group,
values={
'until': snooze_until,
}
)
result['statusDetails'] = {
'snoozeUntil': snooze_until,
}
else:
GroupSnooze.objects.filter(
group__in=group_ids,
).delete()
snooze_until = None
result['statusDetails'] = {}
else:
result['statusDetails'] = {}
if group_list and happened:
if new_status == GroupStatus.UNRESOLVED:
activity_type = Activity.SET_UNRESOLVED
activity_data = {}
elif new_status == GroupStatus.MUTED:
activity_type = Activity.SET_MUTED
activity_data = {
'snoozeUntil': snooze_until,
'snoozeDuration': snooze_duration,
}
for group in group_list:
group.status = new_status
activity = Activity.objects.create(
project=group.project,
group=group,
type=activity_type,
user=acting_user,
data=activity_data,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
activity.send_notification()
if result.get('hasSeen') and project.member_set.filter(user=acting_user).exists():
for group in group_list:
instance, created = create_or_update(
GroupSeen,
group=group,
user=acting_user,
project=group.project,
values={
'last_seen': timezone.now(),
}
)
elif result.get('hasSeen') is False:
GroupSeen.objects.filter(
group__in=group_ids,
user=acting_user,
).delete()
if result.get('isBookmarked'):
for group in group_list:
GroupBookmark.objects.get_or_create(
project=project,
group=group,
user=acting_user,
)
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.bookmark,
)
elif result.get('isBookmarked') is False:
GroupBookmark.objects.filter(
group__in=group_ids,
user=acting_user,
).delete()
# TODO(dcramer): we could make these more efficient by first
# querying for rich rows are present (if N > 2), flipping the flag
# on those rows, and then creating the missing rows
if result.get('isSubscribed') in (True, False):
is_subscribed = result['isSubscribed']
for group in group_list:
GroupSubscription.objects.create_or_update(
user=acting_user,
group=group,
project=project,
values={'is_active': is_subscribed},
)
if result.get('isPublic'):
queryset.update(is_public=True)
for group in group_list:
if group.is_public:
continue
group.is_public = True
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PUBLIC,
user=acting_user,
)
elif result.get('isPublic') is False:
queryset.update(is_public=False)
for group in group_list:
if not group.is_public:
continue
group.is_public = False
Activity.objects.create(
project=group.project,
group=group,
type=Activity.SET_PRIVATE,
user=acting_user,
)
# XXX(dcramer): this feels a bit shady like it should be its own
# endpoint
if result.get('merge') and len(group_list) > 1:
primary_group = sorted(group_list, key=lambda x: -x.times_seen)[0]
children = []
transaction_id = uuid4().hex
for group in group_list:
if group == primary_group:
continue
children.append(group)
group.update(status=GroupStatus.PENDING_MERGE)
merge_group.delay(
from_object_id=group.id,
to_object_id=primary_group.id,
transaction_id=transaction_id,
)
Activity.objects.create(
project=primary_group.project,
group=primary_group,
type=Activity.MERGE,
user=acting_user,
data={
'issues': [{'id': c.id} for c in children],
},
)
result['merge'] = {
'parent': six.text_type(primary_group.id),
'children': [six.text_type(g.id) for g in children],
}
return Response(result)
@attach_scenarios([bulk_remove_issues_scenario])
def delete(self, request, project):
"""
Bulk Remove a List of Issues
````````````````````````````
Permanently remove the given issues. The list of issues to
modify is given through the `id` query parameter. It is repeated
for each issue that should be removed.
Only queries by 'id' are accepted.
If any ids are out of scope this operation will succeed without
any data mutation.
:qparam int id: a list of IDs of the issues to be removed. This
parameter shall be repeated for each issue.
:pparam string organization_slug: the slug of the organization the
issues belong to.
:pparam string project_slug: the slug of the project the issues
belong to.
:auth: required
"""
group_ids = request.GET.getlist('id')
if group_ids:
group_list = list(Group.objects.filter(
project=project,
id__in=set(group_ids),
).exclude(
status__in=[
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
]
))
# filter down group ids to only valid matches
group_ids = [g.id for g in group_list]
else:
# missing any kind of filter
return Response('{"detail": "You must specify a list of IDs for this operation"}', status=400)
if not group_ids:
return Response(status=204)
Group.objects.filter(
id__in=group_ids,
).exclude(
status__in=[
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
]
).update(status=GroupStatus.PENDING_DELETION)
GroupHash.objects.filter(group__id__in=group_ids).delete()
for group in group_list:
delete_group.apply_async(
kwargs={'object_id': group.id},
countdown=3600,
)
return Response(status=204)
| {
"repo_name": "fotinakis/sentry",
"path": "src/sentry/api/endpoints/project_group_index.py",
"copies": "1",
"size": "25800",
"license": "bsd-3-clause",
"hash": 3654244279494128600,
"line_mean": 37.1656804734,
"line_max": 144,
"alpha_frac": 0.5194573643,
"autogenerated": false,
"ratio": 4.856927710843373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00014469264340577232,
"num_lines": 676
} |
from __future__ import absolute_import, division, print_function
import six
from humancrypto.cli import main
from humancrypto import PrivateKey, PublicKey, Certificate, CSR
class Test_token(object):
def do(self, args, stdin=None):
stdout = six.StringIO()
stderr = six.StringIO()
if stdin:
stdin = six.StringIO(stdin)
main(args, stdin=stdin, stdout=stdout, stderr=stderr)
return stdout.getvalue(), stderr.getvalue()
def test_bytes(self):
self.do(['y2016', 'token'])
def test_hex(self):
self.do(['y2016', 'token', '--hex'])
def test_urlsafe(self):
self.do(['y2016', 'token', '--urlsafe'])
class Test_pw(object):
def do(self, args, stdin=None):
stdout = six.StringIO()
stderr = six.StringIO()
if stdin:
stdin = six.StringIO(stdin)
main(args, stdin=stdin, stdout=stdout, stderr=stderr)
return stdout.getvalue(), stderr.getvalue()
def test_store2016(self):
stored, _ = self.do(
['y2016', 'pw', 'store'],
stdin='password')
result, _ = self.do(
['y2016', 'pw', 'verify', stored],
stdin='password')
assert result == 'ok\n'
def test_store2016_wrong_password(self):
stored, _ = self.do(
['y2016', 'pw', 'store'],
stdin='password')
try:
self.do(
['y2016', 'pw', 'verify', stored],
stdin='wrong')
assert False, "Should have raised SystemExit"
except SystemExit as e:
assert e.code == 1, "Should exit with 1 cause wrong password"
def test_store44bc_old(self):
stored, _ = self.do(
['y44bc', 'pw', 'store'],
stdin='password')
try:
self.do(
['y2016', 'pw', 'verify', stored],
stdin='password')
assert False, "Should have raised SystemExit"
except SystemExit as e:
assert e.code == 2, "Should exit with 2 cause wrong year"
def test_store44BC(self):
stored, _ = self.do(
['y44bc', 'pw', 'store'],
stdin='password')
result, _ = self.do(
['y44bc', 'pw', 'verify', stored],
stdin='password')
assert result == 'ok\n'
class Test_rsa(object):
def do(self, args):
return main(['rsa'] + args)
def test_create_private(self, tmpdir):
keyfile = tmpdir.join('foo.key')
self.do(['create-private', keyfile.strpath])
key = PrivateKey.load(filename=keyfile.strpath)
assert key.key_size == 2048
def test_extract_public(self, tmpdir):
privfile = tmpdir.join('something.key')
pubfile = tmpdir.join('something.pub')
self.do(['create-private', privfile.strpath])
self.do(['extract-public', privfile.strpath, pubfile.strpath])
PublicKey.load(filename=pubfile.strpath)
def test_self_signed_cert(self, tmpdir):
keyfile = tmpdir.join('foo.key')
certfile = tmpdir.join('foo.crt')
self.do(['create-private', keyfile.strpath])
self.do([
'self-signed-cert', keyfile.strpath, certfile.strpath,
'--common-name', 'jim', '--state', six.u('CA'),
])
cert = Certificate.load(filename=certfile.strpath)
assert cert.issuer.attribs['common_name'] == u'jim'
assert cert.subject.attribs['common_name'] == u'jim'
assert cert.issuer.attribs['state'] == u'CA'
assert cert.subject.attribs['state'] == u'CA'
def test_create_csr(self, tmpdir):
keyfile = tmpdir.join('foo.key')
csrfile = tmpdir.join('foo.csr')
self.do(['create-private', keyfile.strpath])
self.do([
'create-csr', keyfile.strpath, csrfile.strpath,
'--common-name', 'jim', '--state', six.u('CA'),
])
csr = CSR.load(filename=csrfile.strpath)
assert csr.attribs['common_name'] == u'jim'
assert csr.attribs['state'] == u'CA'
def test_create_csr_extended_attrib(self, tmpdir):
keyfile = tmpdir.join('foo.key')
csrfile = tmpdir.join('foo.csr')
self.do(['create-private', keyfile.strpath])
self.do([
'create-csr', keyfile.strpath, csrfile.strpath,
'--common-name', 'jim',
'--subject-alternative-name', 'dns:jose',
])
csr = CSR.load(filename=csrfile.strpath)
assert csr.attribs['common_name'] == u'jim'
assert csr.extensions['subject_alternative_name']['dns'] == [u'jose']
def test_sign_csr(self, tmpdir):
cakey = tmpdir.join('ca.key')
cacrt = tmpdir.join('ca.crt')
otherkey = tmpdir.join('other.key')
othercsr = tmpdir.join('other.csr')
othercrt = tmpdir.join('other.crt')
self.do(['create-private', cakey.strpath])
self.do([
'self-signed-cert', cakey.strpath, cacrt.strpath,
'--common-name', 'bob',
'--state', 'WA',
])
self.do(['create-private', otherkey.strpath])
self.do([
'create-csr', otherkey.strpath, othercsr.strpath,
'--common-name', 'jim',
'--state', 'CA',
])
self.do([
'sign-csr', cakey.strpath, cacrt.strpath, othercsr.strpath,
othercrt.strpath,
])
cert = Certificate.load(filename=othercrt.strpath)
assert cert.issuer.attribs['common_name'] == u'bob'
assert cert.subject.attribs['common_name'] == u'jim'
assert cert.issuer.attribs['state'] == u'WA'
assert cert.subject.attribs['state'] == u'CA'
def test_sign_csr_server(self, tmpdir):
cakey = tmpdir.join('ca.key')
cacrt = tmpdir.join('ca.crt')
otherkey = tmpdir.join('other.key')
othercsr = tmpdir.join('other.csr')
othercrt = tmpdir.join('other.crt')
self.do(['create-private', cakey.strpath])
self.do([
'self-signed-cert', cakey.strpath, cacrt.strpath,
'--common-name', 'bob',
'--state', 'WA',
])
self.do(['create-private', otherkey.strpath])
self.do([
'create-csr', otherkey.strpath, othercsr.strpath,
'--common-name', 'jim',
'--state', 'CA',
'--server',
])
self.do([
'sign-csr', cakey.strpath, cacrt.strpath, othercsr.strpath,
othercrt.strpath,
])
cert = Certificate.load(filename=othercrt.strpath)
assert cert.issuer.attribs['common_name'] == u'bob'
assert cert.subject.attribs['common_name'] == u'jim'
assert cert.issuer.attribs['state'] == u'WA'
assert cert.subject.attribs['state'] == u'CA'
assert cert.extensions['key_usage']['key_encipherment'] is True
def test_sign_csr_client(self, tmpdir):
cakey = tmpdir.join('ca.key')
cacrt = tmpdir.join('ca.crt')
otherkey = tmpdir.join('other.key')
othercsr = tmpdir.join('other.csr')
othercrt = tmpdir.join('other.crt')
self.do(['create-private', cakey.strpath])
self.do([
'self-signed-cert', cakey.strpath, cacrt.strpath,
'--common-name', 'bob',
'--state', 'WA',
])
self.do(['create-private', otherkey.strpath])
self.do([
'create-csr', otherkey.strpath, othercsr.strpath,
'--common-name', 'jim', '--state', 'CA',
'--client',
])
self.do([
'sign-csr', cakey.strpath, cacrt.strpath, othercsr.strpath,
othercrt.strpath,
])
cert = Certificate.load(filename=othercrt.strpath)
assert cert.issuer.attribs['common_name'] == u'bob'
assert cert.subject.attribs['common_name'] == u'jim'
assert cert.issuer.attribs['state'] == u'WA'
assert cert.subject.attribs['state'] == u'CA'
assert cert.extensions['key_usage']['digital_signature'] is True
| {
"repo_name": "iffy/humancrypto",
"path": "tests/test_cli.py",
"copies": "1",
"size": "8066",
"license": "apache-2.0",
"hash": -7349612659865429000,
"line_mean": 34.5330396476,
"line_max": 77,
"alpha_frac": 0.5519464419,
"autogenerated": false,
"ratio": 3.671370050068275,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9723316491968275,
"avg_score": 0,
"num_lines": 227
} |
from __future__ import absolute_import, division, print_function
import six
from rest_framework.exceptions import ParseError, PermissionDenied
from rest_framework.response import Response
from sentry import features
from sentry.api.bases import OrganizationEventsEndpointBase, OrganizationEventPermission
from sentry.api.helpers.group_index import (
build_query_params_from_request,
calculate_stats_period,
rate_limit_endpoint,
)
from sentry.api.serializers import serialize
from sentry.api.serializers.models.group import StreamGroupSerializerSnuba
from sentry.api.utils import get_date_range_from_params, InvalidParams
from sentry.models import Group
from sentry.utils.compat import map
from sentry.api.endpoints.organization_group_index import ERR_INVALID_STATS_PERIOD
class OrganizationGroupIndexStatsEndpoint(OrganizationEventsEndpointBase):
permission_classes = (OrganizationEventPermission,)
@rate_limit_endpoint(limit=10, window=1)
def get(self, request, organization):
"""
Get the stats on an Organization's Issues
`````````````````````````````
Return a list of issues (groups) with the requested stats. All parameters are
supplied as query string parameters.
:qparam list groups: A list of group ids
:qparam list expand: an optional list of strings to opt in to additional data. Supports `inbox`
:qparam list collapse: an optional list of strings to opt out of certain pieces of data. Supports `stats`, `lifetime`, `filtered`, and `base`
The ``groupStatsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
The ``statsPeriod`` parameter can be used to select a date window starting
from now. Ex. ``14d``.
The ``start`` and ``end`` parameters can be used to select an absolute
date period to fetch issues from.
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam string groupStatsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam string start: Beginning date. You must also provide ``end``.
:qparam string end: End date. You must also provide ``start``.
"""
stats_period = request.GET.get("groupStatsPeriod")
try:
start, end = get_date_range_from_params(request.GET)
except InvalidParams as e:
raise ParseError(detail=six.text_type(e))
expand = request.GET.getlist("expand", [])
collapse = request.GET.getlist("collapse", ["base"])
has_inbox = features.has("organizations:inbox", organization, actor=request.user)
has_workflow_owners = features.has(
"organizations:workflow-owners", organization=organization, actor=request.user
)
projects = self.get_projects(request, organization)
project_ids = [p.id for p in projects]
try:
group_ids = set(map(int, request.GET.getlist("groups")))
except ValueError:
raise ParseError(detail="Group ids must be integers")
if not group_ids:
raise ParseError(
detail="You should include `groups` with your request. (i.e. groups=1,2,3)"
)
else:
groups = list(Group.objects.filter(id__in=group_ids, project_id__in=project_ids))
if not groups:
raise ParseError(detail="No matching groups found")
elif len(groups) > 25:
raise ParseError(detail="Too many groups requested.")
elif any(g for g in groups if not request.access.has_project_access(g.project)):
raise PermissionDenied
if stats_period not in (None, "", "24h", "14d", "auto"):
raise ParseError(detail=ERR_INVALID_STATS_PERIOD)
stats_period, stats_period_start, stats_period_end = calculate_stats_period(
stats_period, start, end
)
environments = self.get_environments(request, organization)
query_kwargs = build_query_params_from_request(
request, organization, projects, environments
)
context = serialize(
groups,
request.user,
StreamGroupSerializerSnuba(
environment_ids=[env.id for env in environments],
stats_period=stats_period,
stats_period_start=stats_period_start,
stats_period_end=stats_period_end,
collapse=collapse,
expand=expand,
has_inbox=has_inbox,
has_workflow_owners=has_workflow_owners,
start=start,
end=end,
search_filters=query_kwargs["search_filters"]
if "search_filters" in query_kwargs
else None,
),
)
response = Response(context)
return response
| {
"repo_name": "beeftornado/sentry",
"path": "src/sentry/api/endpoints/organization_group_index_stats.py",
"copies": "1",
"size": "5115",
"license": "bsd-3-clause",
"hash": 132747826389853340,
"line_mean": 40.9262295082,
"line_max": 149,
"alpha_frac": 0.6246334311,
"autogenerated": false,
"ratio": 4.482909728308501,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.56075431594085,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import socket
import functools
import re
from warnings import warn
import collections
import flask
from flask import Blueprint, Flask, request, Response
try:
from bokeh.server.crossdomain import crossdomain
except ImportError:
def crossdomain(*args, **kwargs):
def wrapper(f):
@functools.wraps(f)
def wrapped(*a, **k):
return f(*a, **k)
return wrapped
return wrapper
from toolz import assoc, valmap
from datashape import Mono, discover, pprint
from datashape.predicates import iscollection, isscalar
from odo import odo
import blaze
from blaze import compute, resource
from blaze.expr import utils as expr_utils
from blaze.compute import compute_up
from .serialization import json, all_formats
from ..interactive import InteractiveSymbol, coerce_scalar
from ..expr import Expr, symbol
__all__ = 'Server', 'to_tree', 'from_tree'
# http://www.speedguide.net/port.php?port=6363
# http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers
DEFAULT_PORT = 6363
api = Blueprint('api', __name__)
pickle_extension_api = Blueprint('pickle_extension_api', __name__)
_no_default = object() # sentinel
def _get_option(option, options, default=_no_default):
try:
return options[option]
except KeyError:
if default is not _no_default:
return default
# Provides a more informative error message.
raise TypeError(
'The blaze api must be registered with {option}'.format(
option=option,
),
)
def _register_api(app, options, first_registration=False):
"""
Register the data with the blueprint.
"""
_get_data.cache[app] = _get_option('data', options)
_get_format.cache[app] = dict(
(f.name, f) for f in _get_option('formats', options)
)
_get_auth.cache[app] = (
_get_option('authorization', options, None) or (lambda a: True)
)
# Call the original register function.
Blueprint.register(api, app, options, first_registration)
api.register = _register_api
def _get_data():
"""
Retrieve the current application's data for use in the blaze server
endpoints.
"""
return _get_data.cache[flask.current_app]
_get_data.cache = {}
def _get_format(name):
return _get_format.cache[flask.current_app][name]
_get_format.cache = {}
def _get_auth():
return _get_auth.cache[flask.current_app]
_get_auth.cache = {}
def authorization(f):
@functools.wraps(f)
def authorized(*args, **kwargs):
if not _get_auth()(request.authorization):
return Response(
'bad auth token',
401,
{'WWW-Authenticate': 'Basic realm="Login Required"'},
)
return f(*args, **kwargs)
return authorized
def check_request(f):
@functools.wraps(f)
def check():
content_type = request.headers['content-type']
matched = mimetype_regex.match(content_type)
if matched is None:
return 'Unsupported serialization format %s' % content_type, 415
try:
serial = _get_format(matched.groups()[0])
except KeyError:
return (
"Unsupported serialization format '%s'" % matched.groups()[0],
415,
)
try:
payload = serial.loads(request.data)
except ValueError:
return ("Bad data. Got %s " % request.data, 400) # 400: Bad Request
return f(payload, serial)
return check
class Server(object):
""" Blaze Data Server
Host local data through a web API
Parameters
----------
data : dict, optional
A dictionary mapping dataset name to any data format that blaze
understands.
formats : iterable, optional
An iterable of supported serialization formats. By default, the
server will support JSON.
A serialization format is an object that supports:
name, loads, and dumps.
authorization : callable, optional
A callable to be used to check the auth header from the client.
This callable should accept a single argument that will either be
None indicating that no header was passed, or an object
containing a username and password attribute. By default, all requests
are allowed.
Examples
--------
>>> from pandas import DataFrame
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Alice', 300],
... [4, 'Dennis', 400],
... [5, 'Bob', -500]],
... columns=['id', 'name', 'amount'])
>>> server = Server({'accounts': df})
>>> server.run() # doctest: +SKIP
"""
def __init__(self, data=None, formats=None, authorization=None):
app = self.app = Flask('blaze.server.server')
if data is None:
data = dict()
app.register_blueprint(
api,
data=data,
formats=formats if formats is not None else (json,),
authorization=authorization,
)
self.data = data
def run(self, port=DEFAULT_PORT, retry=False, **kwargs):
"""Run the server.
Parameters
----------
port : int, optional
The port to bind to.
retry : bool, optional
If the port is busy, should we retry with the next available port?
**kwargs
Forwarded to the underlying flask app's ``run`` method.
Notes
-----
This function blocks forever when successful.
"""
self.port = port
try:
# Blocks until the server is shut down.
self.app.run(port=port, **kwargs)
except socket.error:
if not retry:
raise
warn("Oops, couldn't connect on port %d. Is it busy?" % port)
# Attempt to start the server on a new port.
self.run(port=port + 1, retry=retry, **kwargs)
@api.route('/datashape', methods=['GET'])
@crossdomain(origin='*', methods=['GET'])
@authorization
def shape():
return pprint(discover(_get_data()), width=0)
def to_tree(expr, names=None):
""" Represent Blaze expression with core data structures
Transform a Blaze expression into a form using only strings, dicts, lists
and base types (int, float, datetime, ....) This form can be useful for
serialization.
Parameters
----------
expr : Expr
A Blaze expression
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> to_tree(t) # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> to_tree(t.x.sum()) # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Column',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Column', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
from_tree
"""
if names and expr in names:
return names[expr]
if isinstance(expr, tuple):
return [to_tree(arg, names=names) for arg in expr]
if isinstance(expr, expr_utils._slice):
return to_tree(expr.as_slice(), names=names)
if isinstance(expr, slice):
return {'op': 'slice',
'args': [to_tree(arg, names=names) for arg in
[expr.start, expr.stop, expr.step]]}
elif isinstance(expr, Mono):
return str(expr)
elif isinstance(expr, InteractiveSymbol):
return to_tree(symbol(expr._name, expr.dshape), names)
elif isinstance(expr, Expr):
return {'op': type(expr).__name__,
'args': [to_tree(arg, names) for arg in expr._args]}
else:
return expr
def expression_from_name(name):
"""
>>> expression_from_name('By')
<class 'blaze.expr.split_apply_combine.By'>
>>> expression_from_name('And')
<class 'blaze.expr.arithmetic.And'>
"""
import blaze
if hasattr(blaze, name):
return getattr(blaze, name)
if hasattr(blaze.expr, name):
return getattr(blaze.expr, name)
for signature, func in compute_up.funcs.items():
try:
if signature[0].__name__ == name:
return signature[0]
except TypeError:
pass
raise ValueError('%s not found in compute_up' % name)
def from_tree(expr, namespace=None):
""" Convert core data structures to Blaze expression
Core data structure representations created by ``to_tree`` are converted
back into Blaze expressions.
Parameters
----------
expr : dict
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> tree = to_tree(t)
>>> tree # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> from_tree(tree)
t
>>> tree = to_tree(t.x.sum())
>>> tree # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Field',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
>>> from_tree(tree)
sum(t.x)
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Field', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
to_tree
"""
if isinstance(expr, dict):
op, args = expr['op'], expr['args']
if 'slice' == op:
return expr_utils._slice(*[from_tree(arg, namespace)
for arg in args])
if hasattr(blaze.expr, op):
cls = getattr(blaze.expr, op)
else:
cls = expression_from_name(op)
if 'Symbol' in op:
children = [from_tree(arg) for arg in args]
else:
children = [from_tree(arg, namespace) for arg in args]
return cls(*children)
elif isinstance(expr, list):
return tuple(from_tree(arg, namespace) for arg in expr)
if namespace and expr in namespace:
return namespace[expr]
else:
return expr
mimetype_regex = re.compile(r'^application/vnd\.blaze\+(%s)$' %
'|'.join(x.name for x in all_formats))
@api.route('/compute', methods=['POST', 'HEAD', 'OPTIONS'])
@crossdomain(origin='*', methods=['POST', 'HEAD', 'OPTIONS'])
@authorization
@check_request
def compserver(payload, serial):
ns = payload.get('namespace', dict())
compute_kwargs = payload.get('compute_kwargs') or {}
odo_kwargs = payload.get('odo_kwargs') or {}
dataset = _get_data()
ns[':leaf'] = symbol('leaf', discover(dataset))
expr = from_tree(payload['expr'], namespace=ns)
assert len(expr._leaves()) == 1
leaf = expr._leaves()[0]
try:
result = compute(expr, {leaf: dataset}, **compute_kwargs)
if iscollection(expr.dshape):
result = odo(result, list, **odo_kwargs)
elif isscalar(expr.dshape):
result = coerce_scalar(result, str(expr.dshape))
except NotImplementedError as e:
# 501: Not Implemented
return ("Computation not supported:\n%s" % e, 501)
except Exception as e:
# 500: Internal Server Error
return (
"Computation failed with message:\n%s: %s" % (type(e).__name__, e),
500,
)
return serial.dumps({
'datashape': pprint(expr.dshape, width=0),
'data': result,
'names': expr.fields
})
@api.route('/add', methods=['POST', 'HEAD', 'OPTIONS'])
@crossdomain(origin='*', methods=['POST', 'HEAD', 'OPTIONS'])
@authorization
@check_request
def addserver(payload, serial):
"""Add a data resource to the server.
The reuest should contain serialized MutableMapping (dictionary) like
object, and the server should already be hosting a MutableMapping
resource.
"""
data = _get_data.cache[flask.current_app]
data_not_mm_msg = ("Cannot update blaze server data since its current data"
" is a %s and not a mutable mapping (dictionary like).")
if not isinstance(data, collections.MutableMapping):
return (data_not_mm_msg % type(data), 422)
payload_not_mm_msg = ("Cannot update blaze server with a %s payload, since"
" it is not a mutable mapping (dictionary like).")
if not isinstance(payload, collections.MutableMapping):
return (payload_not_mm_msg % type(payload), 422)
data.update(valmap(resource, payload))
return 'OK'
| {
"repo_name": "cowlicks/blaze",
"path": "blaze/server/server.py",
"copies": "2",
"size": "13284",
"license": "bsd-3-clause",
"hash": -971662984577634400,
"line_mean": 27.6911447084,
"line_max": 81,
"alpha_frac": 0.5701595905,
"autogenerated": false,
"ratio": 3.888758782201405,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5458918372701406,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import socket
import functools
import re
import flask
from flask import Blueprint, Flask, request, Response
try:
from bokeh.server.crossdomain import crossdomain
except ImportError:
def crossdomain(*args, **kwargs):
def wrapper(f):
@functools.wraps(f)
def wrapped(*a, **k):
return f(*a, **k)
return wrapped
return wrapper
from toolz import assoc
from datashape import Mono, discover
from datashape.predicates import iscollection, isscalar
from odo import odo
import blaze
from blaze import compute
from blaze.expr import utils as expr_utils
from blaze.compute import compute_up
from .serialization import json, all_formats
from ..interactive import InteractiveSymbol, coerce_scalar
from ..expr import Expr, symbol
__all__ = 'Server', 'to_tree', 'from_tree'
# http://www.speedguide.net/port.php?port=6363
# http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers
DEFAULT_PORT = 6363
api = Blueprint('api', __name__)
pickle_extension_api = Blueprint('pickle_extension_api', __name__)
_no_default = object() # sentinel
def _get_option(option, options, default=_no_default):
try:
return options[option]
except KeyError:
if default is not _no_default:
return default
# Provides a more informative error message.
raise TypeError(
'The blaze api must be registered with {option}'.format(
option=option,
),
)
def _register_api(app, options, first_registration=False):
"""
Register the data with the blueprint.
"""
_get_data.cache[app] = _get_option('data', options)
_get_format.cache[app] = dict(
(f.name, f) for f in _get_option('formats', options)
)
_get_auth.cache[app] = (
_get_option('authorization', options, None) or (lambda a: True)
)
# Call the original register function.
Blueprint.register(api, app, options, first_registration)
api.register = _register_api
def _get_data():
"""
Retrieve the current application's data for use in the blaze server
endpoints.
"""
return _get_data.cache[flask.current_app]
_get_data.cache = {}
def _get_format(name):
return _get_format.cache[flask.current_app][name]
_get_format.cache = {}
def _get_auth():
return _get_auth.cache[flask.current_app]
_get_auth.cache = {}
def authorization(f):
@functools.wraps(f)
def authorized(*args, **kwargs):
if not _get_auth()(request.authorization):
return Response(
'bad auth token',
401,
{'WWW-Authenticate': 'Basic realm="Login Required"'},
)
return f(*args, **kwargs)
return authorized
class Server(object):
""" Blaze Data Server
Host local data through a web API
Parameters
----------
data : dict, optional
A dictionary mapping dataset name to any data format that blaze
understands.
formats : iterable, optional
An iterable of supported serialization formats. By default, the
server will support JSON.
A serialization format is an object that supports:
name, loads, and dumps.
authorization : callable, optional
A callable to be used to check the auth header from the client.
This callable should accept a single argument that will either be
None indicating that no header was passed, or an object
containing a username and password attribute. By default, all requests
are allowed.
Examples
--------
>>> from pandas import DataFrame
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Alice', 300],
... [4, 'Dennis', 400],
... [5, 'Bob', -500]],
... columns=['id', 'name', 'amount'])
>>> server = Server({'accounts': df})
>>> server.run() # doctest: +SKIP
"""
__slots__ = 'app', 'data', 'port'
def __init__(self, data=None, formats=None, authorization=None):
app = self.app = Flask('blaze.server.server')
if data is None:
data = dict()
app.register_blueprint(
api,
data=data,
formats=formats if formats is not None else (json,),
authorization=authorization,
)
self.data = data
def run(self, *args, **kwargs):
"""Run the server"""
port = kwargs.pop('port', DEFAULT_PORT)
self.port = port
try:
self.app.run(*args, port=port, **kwargs)
except socket.error:
print("\tOops, couldn't connect on port %d. Is it busy?" % port)
if kwargs.get('retry', True):
# Attempt to start the server on a new port.
self.run(*args, **assoc(kwargs, 'port', port + 1))
@api.route('/datashape', methods=['GET'])
@crossdomain(origin='*', methods=['GET'])
@authorization
def shape():
return str(discover(_get_data()))
def to_tree(expr, names=None):
""" Represent Blaze expression with core data structures
Transform a Blaze expression into a form using only strings, dicts, lists
and base types (int, float, datetime, ....) This form can be useful for
serialization.
Parameters
----------
expr : Expr
A Blaze expression
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> to_tree(t) # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> to_tree(t.x.sum()) # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Column',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Column', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
from_tree
"""
if names and expr in names:
return names[expr]
if isinstance(expr, tuple):
return [to_tree(arg, names=names) for arg in expr]
if isinstance(expr, expr_utils._slice):
return to_tree(expr.as_slice(), names=names)
if isinstance(expr, slice):
return {'op': 'slice',
'args': [to_tree(arg, names=names) for arg in
[expr.start, expr.stop, expr.step]]}
elif isinstance(expr, Mono):
return str(expr)
elif isinstance(expr, InteractiveSymbol):
return to_tree(symbol(expr._name, expr.dshape), names)
elif isinstance(expr, Expr):
return {'op': type(expr).__name__,
'args': [to_tree(arg, names) for arg in expr._args]}
else:
return expr
def expression_from_name(name):
"""
>>> expression_from_name('By')
<class 'blaze.expr.split_apply_combine.By'>
>>> expression_from_name('And')
<class 'blaze.expr.arithmetic.And'>
"""
import blaze
if hasattr(blaze, name):
return getattr(blaze, name)
if hasattr(blaze.expr, name):
return getattr(blaze.expr, name)
for signature, func in compute_up.funcs.items():
try:
if signature[0].__name__ == name:
return signature[0]
except TypeError:
pass
raise ValueError('%s not found in compute_up' % name)
def from_tree(expr, namespace=None):
""" Convert core data structures to Blaze expression
Core data structure representations created by ``to_tree`` are converted
back into Blaze expressions.
Parameters
----------
expr : dict
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> tree = to_tree(t)
>>> tree # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> from_tree(tree)
t
>>> tree = to_tree(t.x.sum())
>>> tree # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Field',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
>>> from_tree(tree)
sum(t.x)
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Field', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
to_tree
"""
if isinstance(expr, dict):
op, args = expr['op'], expr['args']
if 'slice' == op:
return expr_utils._slice(*[from_tree(arg, namespace)
for arg in args])
if hasattr(blaze.expr, op):
cls = getattr(blaze.expr, op)
else:
cls = expression_from_name(op)
if 'Symbol' in op:
children = [from_tree(arg) for arg in args]
else:
children = [from_tree(arg, namespace) for arg in args]
return cls(*children)
elif isinstance(expr, list):
return tuple(from_tree(arg, namespace) for arg in expr)
if namespace and expr in namespace:
return namespace[expr]
else:
return expr
mimetype_regex = re.compile(r'^application/vnd\.blaze\+(%s)$' %
'|'.join(x.name for x in all_formats))
@api.route('/compute', methods=['POST', 'HEAD', 'OPTIONS'])
@crossdomain(origin='*', methods=['POST', 'HEAD', 'OPTIONS'])
@authorization
def compserver():
content_type = request.headers['content-type']
matched = mimetype_regex.match(content_type)
if matched is None:
return 'Unsupported serialization format %s' % content_type, 415
try:
serial = _get_format(matched.groups()[0])
except KeyError:
return (
"Unsupported serialization format '%s'" % matched.groups()[0],
415,
)
try:
payload = serial.loads(request.data)
except ValueError:
return ("Bad data. Got %s " % request.data, 400) # 400: Bad Request
ns = payload.get('namespace', dict())
dataset = _get_data()
ns[':leaf'] = symbol('leaf', discover(dataset))
expr = from_tree(payload['expr'], namespace=ns)
assert len(expr._leaves()) == 1
leaf = expr._leaves()[0]
try:
result = compute(expr, {leaf: dataset})
if iscollection(expr.dshape):
result = odo(result, list)
elif isscalar(expr.dshape):
result = coerce_scalar(result, str(expr.dshape))
except NotImplementedError as e:
# 501: Not Implemented
return ("Computation not supported:\n%s" % e, 501)
except Exception as e:
# 500: Internal Server Error
return ("Computation failed with message:\n%s" % e, 500)
return serial.dumps({
'datashape': str(expr.dshape),
'data': result,
'names': expr.fields
})
| {
"repo_name": "caseyclements/blaze",
"path": "blaze/server/server.py",
"copies": "10",
"size": "11382",
"license": "bsd-3-clause",
"hash": -1975865234724830200,
"line_mean": 26.8970588235,
"line_max": 78,
"alpha_frac": 0.5638727816,
"autogenerated": false,
"ratio": 3.8181818181818183,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9382054599781818,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import socket
from tornado import gen
from tornado.iostream import IOStream
from tornado.log import app_log
from tornado.stack_context import NullContext
from tornado.tcpserver import TCPServer
from tornado.test.util import skipBefore35, exec_test
from tornado.testing import AsyncTestCase, ExpectLog, bind_unused_port, gen_test
class TCPServerTest(AsyncTestCase):
@gen_test
def test_handle_stream_coroutine_logging(self):
# handle_stream may be a coroutine and any exception in its
# Future will be logged.
class TestServer(TCPServer):
@gen.coroutine
def handle_stream(self, stream, address):
yield stream.read_bytes(len(b'hello'))
stream.close()
1 / 0
server = client = None
try:
sock, port = bind_unused_port()
with NullContext():
server = TestServer()
server.add_socket(sock)
client = IOStream(socket.socket())
with ExpectLog(app_log, "Exception in callback"):
yield client.connect(('localhost', port))
yield client.write(b'hello')
yield client.read_until_close()
yield gen.moment
finally:
if server is not None:
server.stop()
if client is not None:
client.close()
@skipBefore35
@gen_test
def test_handle_stream_native_coroutine(self):
# handle_stream may be a native coroutine.
namespace = exec_test(globals(), locals(), """
class TestServer(TCPServer):
async def handle_stream(self, stream, address):
stream.write(b'data')
stream.close()
""")
sock, port = bind_unused_port()
server = namespace['TestServer']()
server.add_socket(sock)
client = IOStream(socket.socket())
yield client.connect(('localhost', port))
result = yield client.read_until_close()
self.assertEqual(result, b'data')
server.stop()
client.close()
def test_stop_twice(self):
sock, port = bind_unused_port()
server = TCPServer()
server.add_socket(sock)
server.stop()
server.stop()
@gen_test
def test_stop_in_callback(self):
# Issue #2069: calling server.stop() in a loop callback should not
# raise EBADF when the loop handles other server connection
# requests in the same loop iteration
class TestServer(TCPServer):
@gen.coroutine
def handle_stream(self, stream, address):
server.stop()
yield stream.read_until_close()
sock, port = bind_unused_port()
server = TestServer()
server.add_socket(sock)
server_addr = ('localhost', port)
N = 40
clients = [IOStream(socket.socket()) for i in range(N)]
connected_clients = []
@gen.coroutine
def connect(c):
try:
yield c.connect(server_addr)
except EnvironmentError:
pass
else:
connected_clients.append(c)
yield [connect(c) for c in clients]
self.assertGreater(len(connected_clients), 0,
"all clients failed connecting")
try:
if len(connected_clients) == N:
# Ideally we'd make the test deterministic, but we're testing
# for a race condition in combination with the system's TCP stack...
self.skipTest("at least one client should fail connecting "
"for the test to be meaningful")
finally:
for c in connected_clients:
c.close()
# Here tearDown() would re-raise the EBADF encountered in the IO loop
| {
"repo_name": "legnaleurc/tornado",
"path": "tornado/test/tcpserver_test.py",
"copies": "1",
"size": "3963",
"license": "apache-2.0",
"hash": 8972633873521718000,
"line_mean": 33.1637931034,
"line_max": 84,
"alpha_frac": 0.5738077214,
"autogenerated": false,
"ratio": 4.544724770642202,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002078481456439536,
"num_lines": 116
} |
from __future__ import absolute_import, division, print_function
import socket
import click
from construct import UBInt16
from tlsenum.parse_hello import (
ClientHello, Extensions, HandshakeFailure, ProtocolVersionFailure,
ServerHello, construct_sslv2_client_hello
)
from tlsenum.mappings import (
CipherSuites, ECCurves, ECPointFormat, TLSProtocolVersion
)
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
def send_client_hello(host, port, data):
"""
Sends a ClientHello message in bytes.
Returns a ServerHello message in bytes
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(data)
server_hello = s.recv(5)
# This is to handle the case where the server fails
# to respond instead of a returning a handshake failure.
if len(server_hello) == 0:
raise HandshakeFailure()
server_hello += s.recv(UBInt16("length").parse(server_hello[3:5]))
return server_hello
except socket.error:
raise HandshakeFailure()
def send_sslv2_client_hello(host, port):
"""
Sends a SSLv2 ClientHello message in bytes.
If server supports SSLv2, returns None. Else raise a HandshakeFailure().
"""
data = construct_sslv2_client_hello()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send(data)
server_hello = s.recv(3)
if len(server_hello) == 0:
raise HandshakeFailure()
if server_hello[2] == 4:
pass
else:
raise HandshakeFailure()
except socket.error:
raise HandshakeFailure()
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument("host", type=click.STRING)
@click.argument("port", type=click.INT)
def cli(host, port):
"""
A command line tool to enumerate TLS cipher-suites supported by a server.
"""
cipher_suites_list = [i.name for i in CipherSuites]
extension = Extensions()
extension.sni = host
extension.ec_curves = [i.name for i in ECCurves]
extension.ec_point_format = [i.name for i in ECPointFormat]
client_hello = ClientHello()
client_hello.deflate = False
client_hello.extensions = extension.build()
client_hello.cipher_suites = cipher_suites_list
supported_tls_versions = []
try:
send_sslv2_client_hello(host, port)
supported_tls_versions.append("2.0")
except HandshakeFailure:
pass
for i in TLSProtocolVersion:
client_hello.protocol_version = i
try:
server_hello = send_client_hello(host, port, client_hello.build())
server_hello = ServerHello.parse_server_hello(server_hello)
except HandshakeFailure:
continue
except ProtocolVersionFailure:
continue
supported_tls_versions.append(server_hello.protocol_version)
supported_tls_versions = sorted(
list(set(supported_tls_versions)),
key=lambda x: 0 if x == "2.0" else TLSProtocolVersion.index(x) + 1
)
print("TLS Versions supported by server: {0}".format(
", ".join(supported_tls_versions)
))
client_hello.protocol_version = supported_tls_versions[-1]
client_hello.deflate = True
try:
server_hello = send_client_hello(host, port, client_hello.build())
server_hello = ServerHello.parse_server_hello(server_hello)
except HandshakeFailure:
pass
print("Deflate compression: {0}".format(
"Yes" if server_hello.deflate else "No"
))
client_hello.deflate = False
supported_cipher_suites = []
print("Supported Cipher suites in order of priority: ")
while True:
client_hello.cipher_suites = cipher_suites_list
try:
server_hello = send_client_hello(host, port, client_hello.build())
server_hello = ServerHello.parse_server_hello(server_hello)
except HandshakeFailure:
break
supported_cipher_suites.append(server_hello.cipher_suite)
cipher_suites_list.remove(server_hello.cipher_suite)
for i in supported_cipher_suites:
print(i)
| {
"repo_name": "Ayrx/tlsenum",
"path": "tlsenum/__init__.py",
"copies": "1",
"size": "4230",
"license": "mit",
"hash": 7694310272395043000,
"line_mean": 26.1153846154,
"line_max": 78,
"alpha_frac": 0.6472813239,
"autogenerated": false,
"ratio": 3.884297520661157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5031578844561156,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sqlalchemy as sa
from .compatibility import basestring
from .dispatch import dispatch
__all__ = ()
@dispatch(sa.Table, basestring)
def create_index(s, column, **kwargs):
"""Create an index for a single column.
Parameters
----------
s : sa.Table
The table to create the index on.
column : str
The name of the column to create an index on.
name : str
The name of the created index.
unique : bool, optional
Should the index be unique.
ignore_existing : bool, optional
Should this supress exceptions from the index already existing.
concurrently : bool, optional
Should the index be created without holding a lock. This feature is
postgres specific.
"""
return create_index(s, (column,), **kwargs)
@dispatch(sa.Table, (list, tuple))
def create_index(s,
columns,
name=None,
unique=False,
ignore_existing=False,
concurrently=False):
"""Create an index for a single column.
Parameters
----------
s : sa.Table
The table to create the index on.
columns : list[str] or tuple[str]
The names of the columns to create an index on.
name : str
The name of the created index.
unique : bool, optional
Should the index be unique.
ignore_existing : bool, optional
Should this supress exceptions from the index already existing.
concurrently : bool, optional
Should the index be created without holding a lock. This feature is
postgres specific.
"""
if name is None:
raise ValueError('SQL indexes must have a name')
try:
sa.Index(
name,
*(s.c[c] for c in columns),
unique=unique,
**{'postgresql_concurrently': concurrently} if concurrently else {}
).create(s.bind)
except (sa.exc.ProgrammingError, sa.exc.OperationalError):
# sqlite raises OperationalError but postgres raises ProgrammingError
# thanks fam
if not ignore_existing:
raise
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/sql.py",
"copies": "3",
"size": "2200",
"license": "bsd-3-clause",
"hash": 574500308309553400,
"line_mean": 29.1369863014,
"line_max": 79,
"alpha_frac": 0.6163636364,
"autogenerated": false,
"ratio": 4.564315352697095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6680678989097095,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import string
from functools import partial
from contextlib import contextmanager
from glue.external.six import PY2
from glue.external.six.moves import reduce
__all__ = ['DeferredMethod', 'nonpartial', 'lookup_class', 'as_variable_name',
'as_list', 'file_format', 'CallbackMixin', 'PropertySetMixin',
'Pointer', 'defer']
class DeferredMethod(object):
"""
This class stubs out a method, and provides a callable interface that logs
its calls. These can later be actually executed on the original
(non-stubbed) method by calling executed_deferred_calls
"""
def __init__(self, method):
self.method = method
self.calls = [] # avoid hashability issues with dict/set
@property
def original_method(self):
return self.method
def __call__(self, instance, *a, **k):
if instance not in (c[0] for c in self.calls):
self.calls.append((instance, a, k))
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.__call__, instance)
def execute_deferred_calls(self):
for instance, args, kwargs in self.calls:
self.method(instance, *args, **kwargs)
def nonpartial(func, *args, **kwargs):
"""
Like functools.partial, this returns a function which, when called, calls
``func(*args, **kwargs)``.
Unlike functools.partial, extra arguments passed to the returned function
are *not* passed to the input function.
This is used when connecting slots to ``QAction.triggered`` signals, which
appear to have different signatures, which seem to add and extra argument
in PyQt4 but not PySide
"""
def result(*a, **k):
return func(*args, **kwargs)
return result
def lookup_class(ref):
"""
Look up an object via its module string (e.g., 'glue.core.Data')
Parameters
----------
ref : str
The module string
"""
if PY2 and ref.startswith('builtins'):
ref = '.'.join(['__builtin__'] + ref.split('.')[1:])
elif not PY2 and ref.startswith('__builtin__'):
ref = '.'.join(['builtins'] + ref.split('.')[1:])
mod = ref.rsplit('.', 1)[0]
try:
result = __import__(mod)
except ImportError:
raise ValueError("Module '{0}' not found".format(mod))
try:
for attr in ref.split('.')[1:]:
result = getattr(result, attr)
return result
except AttributeError:
raise ValueError("Object '{0}' not found".format(ref))
def as_variable_name(x):
"""
Convert a string to a legal python variable name
Parameters
----------
x : str
A string to (possibly) rename
Returns
-------
variable_name : str
A legal Python variable name
"""
allowed = string.ascii_letters + string.digits + '_'
result = [letter if letter in allowed else '_' for letter in x or 'x']
if result[0] in string.digits:
result.insert(0, '_')
return ''.join(result)
def as_list(x):
if isinstance(x, list):
return x
return [x]
def file_format(filename):
if filename.find('.') == -1:
return ''
if filename.lower().endswith('.gz'):
result = filename.lower().rsplit('.', 2)[1]
else:
result = filename.lower().rsplit('.', 1)[1]
return result
class CallbackMixin(object):
"""
A mixin that provides a utility for attaching callback
functions to methods
"""
def __init__(self):
self._callbacks = []
def add_callback(self, function):
self._callbacks.append(function)
def remove_callback(self, function):
self._callbacks.remove(function)
def notify(self, *args, **kwargs):
for func in self._callbacks:
func(*args, **kwargs)
class PropertySetMixin(object):
"""An object that provides a set of properties that
are meant to encapsulate state information
This class exposes a properties attribute, which is a dict
of all properties. Similarly, assigning to the properties dict
will update the individual properties
"""
_property_set = []
@property
def properties(self):
""" A dict mapping property names to values """
return dict((p, getattr(self, p)) for p in self._property_set)
@properties.setter
def properties(self, value):
""" Update the properties with a new dict.
Keys in the new dict must be valid property names defined in
the _property_set class level attribute"""
invalid = set(value.keys()) - set(self._property_set)
if invalid:
raise ValueError("Invalid property values: %s" % invalid)
for k in self._property_set:
if k not in value:
continue
setattr(self, k, value[k])
class Pointer(object):
def __init__(self, key):
self.key = key
def __get__(self, instance, type=None):
val = instance
for k in self.key.split('.'):
val = getattr(val, k, None)
return val
def __set__(self, instance, value):
v = self.key.split('.')
attr = reduce(getattr, [instance] + v[:-1])
setattr(attr, v[-1], value)
# TODO: defer can be removed since it doesn't appear to be used anywhere
@contextmanager
def defer(instance, method):
"""
Defer the calling of a method inside a context manager,
and then call it 0 or 1 times afterwards.
:param instance: The instance of the method to defer
:param method: The name of the method to defer
:type method: str
Within the context block, calls to the method will be
intercepted, logged, and skipped.
Upon exiting the context block, the method will be
invoked a single time, with the arguments of the
most recent invokation inside the context block.
If the method is never invoked in the context block,
it is not called when leaving that block.
"""
history = []
def log(*a, **k):
history.append((a, k))
orig = getattr(instance, method)
setattr(instance, method, log)
try:
yield
finally:
setattr(instance, method, orig)
for a, k in history[-1:]:
orig(*a, **k)
| {
"repo_name": "stscieisenhamer/glue",
"path": "glue/utils/misc.py",
"copies": "3",
"size": "6340",
"license": "bsd-3-clause",
"hash": -3989931173730898000,
"line_mean": 26.094017094,
"line_max": 78,
"alpha_frac": 0.6124605678,
"autogenerated": false,
"ratio": 4.19035029742234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6302810865222339,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import string
from functools import partial
from contextlib import contextmanager
from glue.external.six.moves import reduce
__all__ = ['DeferredMethod', 'nonpartial', 'lookup_class', 'as_variable_name',
'as_list', 'file_format', 'CallbackMixin', 'PropertySetMixin',
'Pointer', 'defer']
class DeferredMethod(object):
"""
This class stubs out a method, and provides a callable interface that logs
its calls. These can later be actually executed on the original
(non-stubbed) method by calling executed_deferred_calls
"""
def __init__(self, method):
self.method = method
self.calls = [] # avoid hashability issues with dict/set
@property
def original_method(self):
return self.method
def __call__(self, instance, *a, **k):
if instance not in (c[0] for c in self.calls):
self.calls.append((instance, a, k))
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.__call__, instance)
def execute_deferred_calls(self):
for instance, args, kwargs in self.calls:
self.method(instance, *args, **kwargs)
def nonpartial(func, *args, **kwargs):
"""
Like functools.partial, this returns a function which, when called, calls
``func(*args, **kwargs)``.
Unlike functools.partial, extra arguments passed to the returned function
are *not* passed to the input function.
This is used when connecting slots to ``QAction.triggered`` signals, which
appear to have different signatures, which seem to add and extra argument
in PyQt4 but not PySide
"""
def result(*a, **k):
return func(*args, **kwargs)
return result
def lookup_class(ref):
"""
Look up an object via its module string (e.g., 'glue.core.Data')
Parameters
----------
ref : str
The module string
"""
mod = ref.rsplit('.', 1)[0]
try:
result = __import__(mod)
except ImportError:
raise ValueError("Module '{0}' not found".format(mod))
try:
for attr in ref.split('.')[1:]:
result = getattr(result, attr)
return result
except AttributeError:
raise ValueError("Object '{0}' not found".format(ref))
def as_variable_name(x):
"""
Convert a string to a legal python variable name
Parameters
----------
x : str
A string to (possibly) rename
Returns
-------
variable_name : str
A legal Python variable name
"""
allowed = string.ascii_letters + string.digits + '_'
result = [letter if letter in allowed else '_' for letter in x or 'x']
if result[0] in string.digits:
result.insert(0, '_')
return ''.join(result)
def as_list(x):
if isinstance(x, list):
return x
return [x]
def file_format(filename):
if filename.find('.') == -1:
return ''
if filename.lower().endswith('.gz'):
result = filename.lower().rsplit('.', 2)[1]
else:
result = filename.lower().rsplit('.', 1)[1]
return result
class CallbackMixin(object):
"""
A mixin that provides a utility for attaching callback
functions to methods
"""
def __init__(self):
self._callbacks = []
def add_callback(self, function):
self._callbacks.append(function)
def remove_callback(self, function):
self._callbacks.remove(function)
def notify(self, *args, **kwargs):
for func in self._callbacks:
func(*args, **kwargs)
class PropertySetMixin(object):
"""An object that provides a set of properties that
are meant to encapsulate state information
This class exposes a properties attribute, which is a dict
of all properties. Similarly, assigning to the properties dict
will update the individual properties
"""
_property_set = []
@property
def properties(self):
""" A dict mapping property names to values """
return dict((p, getattr(self, p)) for p in self._property_set)
@properties.setter
def properties(self, value):
""" Update the properties with a new dict.
Keys in the new dict must be valid property names defined in
the _property_set class level attribute"""
invalid = set(value.keys()) - set(self._property_set)
if invalid:
raise ValueError("Invalid property values: %s" % invalid)
for k in self._property_set:
if k not in value:
continue
setattr(self, k, value[k])
class Pointer(object):
def __init__(self, key):
self.key = key
def __get__(self, instance, type=None):
val = instance
for k in self.key.split('.'):
val = getattr(val, k, None)
return val
def __set__(self, instance, value):
v = self.key.split('.')
attr = reduce(getattr, [instance] + v[:-1])
setattr(attr, v[-1], value)
# TODO: defer can be removed since it doesn't appear to be used anywhere
@contextmanager
def defer(instance, method):
"""
Defer the calling of a method inside a context manager,
and then call it 0 or 1 times afterwards.
:param instance: The instance of the method to defer
:param method: The name of the method to defer
:type method: str
Within the context block, calls to the method will be
intercepted, logged, and skipped.
Upon exiting the context block, the method will be
invoked a single time, with the arguments of the
most recent invokation inside the context block.
If the method is never invoked in the context block,
it is not called when leaving that block.
"""
history = []
def log(*a, **k):
history.append((a, k))
orig = getattr(instance, method)
setattr(instance, method, log)
try:
yield
finally:
setattr(instance, method, orig)
for a, k in history[-1:]:
orig(*a, **k)
| {
"repo_name": "saimn/glue",
"path": "glue/utils/misc.py",
"copies": "1",
"size": "6088",
"license": "bsd-3-clause",
"hash": -6025967637378438000,
"line_mean": 26.0577777778,
"line_max": 78,
"alpha_frac": 0.6153088042,
"autogenerated": false,
"ratio": 4.204419889502763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 225
} |
from __future__ import absolute_import, division, print_function
import string
from functools import partial
__all__ = ['DeferredMethod', 'nonpartial', 'lookup_class', 'as_variable_name',
'as_list', 'file_format']
class DeferredMethod(object):
"""
This class stubs out a method, and provides a
callable interface that logs its calls. These
can later be actually executed on the original (non-stubbed)
method by calling executed_deferred_calls
"""
def __init__(self, method):
self.method = method
self.calls = [] # avoid hashability issues with dict/set
@property
def original_method(self):
return self.method
def __call__(self, instance, *a, **k):
if instance not in (c[0] for c in self.calls):
self.calls.append((instance, a, k))
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.__call__, instance)
def execute_deferred_calls(self):
for instance, args, kwargs in self.calls:
self.method(instance, *args, **kwargs)
def nonpartial(func, *args, **kwargs):
"""Like functools.partial, this returns a function which,
when called, calls func(*args, **kwargs). Unlike functools.partial,
extra arguments passed to the returned function are *not* passed
to the input function.
This is used when connecting slots to QAction.triggered signals,
which appear to have different signatures, which seem to add
and extra argument in PyQt4 but not PySide
"""
def result(*a, **k):
return func(*args, **kwargs)
return result
def lookup_class(ref):
""" Look up an object via it's module string (e.g., 'glue.core.Data')
:param ref: reference
:type ref: str
:rtype: object, or None if not found
"""
mod = ref.split('.')[0]
try:
result = __import__(mod)
except ImportError:
return None
try:
for attr in ref.split('.')[1:]:
result = getattr(result, attr)
return result
except AttributeError:
return None
def as_variable_name(x):
"""
Convert a string to a legal python variable name
:param x: A string to (possibly) rename
:returns: A legal python variable name
"""
allowed = string.ascii_letters + string.digits + '_'
result = [letter if letter in allowed else '_' for letter in x or 'x']
if result[0] in string.digits:
result.insert(0, '_')
return ''.join(result)
def as_list(x):
if isinstance(x, list):
return x
return [x]
def file_format(filename):
if filename.find('.') == -1:
return ''
if filename.lower().endswith('.gz'):
result = filename.lower().rsplit('.', 2)[1]
else:
result = filename.lower().rsplit('.', 1)[1]
return result
| {
"repo_name": "JudoWill/glue",
"path": "glue/utils/misc.py",
"copies": "1",
"size": "2866",
"license": "bsd-3-clause",
"hash": -1409731516759245600,
"line_mean": 26.8252427184,
"line_max": 78,
"alpha_frac": 0.6186322401,
"autogenerated": false,
"ratio": 4.048022598870056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5166654838970056,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import stripe
from stripe import api_requestor
from stripe import util
from stripe.api_resources.abstract import ListableAPIResource
class File(ListableAPIResource):
OBJECT_NAME = "file"
# This resource can have two different object names. In latter API
# versions, only `file` is used, but since stripe-python may be used with
# any API version, we need to support deserializing the older
# `file_upload` object into the same class.
OBJECT_NAME_ALT = "file_upload"
@classmethod
def class_url(cls):
return "/v1/files"
@classmethod
def create(
# 'api_version' is deprecated, please use 'stripe_version'
cls,
api_key=None,
api_version=None,
stripe_version=None,
stripe_account=None,
**params
):
version = api_version or stripe_version
requestor = api_requestor.APIRequestor(
api_key,
api_base=stripe.upload_api_base,
api_version=version,
account=stripe_account,
)
url = cls.class_url()
supplied_headers = {"Content-Type": "multipart/form-data"}
response, api_key = requestor.request(
"post", url, params=params, headers=supplied_headers
)
return util.convert_to_stripe_object(
response, api_key, version, stripe_account
)
# For backwards compatibility, the `File` class is aliased to `FileUpload`.
FileUpload = File
| {
"repo_name": "stripe/stripe-python",
"path": "stripe/api_resources/file.py",
"copies": "1",
"size": "1539",
"license": "mit",
"hash": 4789180554177122000,
"line_mean": 29.78,
"line_max": 77,
"alpha_frac": 0.6419753086,
"autogenerated": false,
"ratio": 3.9461538461538463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 50
} |
from __future__ import absolute_import, division, print_function
import stripe
from stripe import oauth, six
from stripe import util
from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import DeletableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
from stripe.api_resources.abstract import UpdateableAPIResource
from stripe.api_resources.abstract import custom_method
from stripe.api_resources.abstract import nested_resource_class_methods
from stripe.six.moves.urllib.parse import quote_plus
@custom_method("reject", http_verb="post")
@nested_resource_class_methods(
"capability",
operations=["retrieve", "update", "list"],
resource_plural="capabilities",
)
@nested_resource_class_methods(
"external_account",
operations=["create", "retrieve", "update", "delete", "list"],
)
@nested_resource_class_methods("login_link", operations=["create"])
@nested_resource_class_methods(
"person",
operations=["create", "retrieve", "update", "delete", "list"],
)
class Account(
CreateableAPIResource,
DeletableAPIResource,
ListableAPIResource,
UpdateableAPIResource,
):
OBJECT_NAME = "account"
def reject(self, idempotency_key=None, **params):
url = self.instance_url() + "/reject"
headers = util.populate_headers(idempotency_key)
self.refresh_from(self.request("post", url, params, headers))
return self
# We are not adding a helper for capabilities here as the Account object already has a
# capabilities property which is a hash and not the sub-list of capabilities.
@classmethod
def retrieve(cls, id=None, api_key=None, **params):
instance = cls(id, api_key, **params)
instance.refresh()
return instance
@classmethod
def modify(cls, id=None, **params):
url = cls._build_instance_url(id)
return cls._static_request("post", url, **params)
@classmethod
def _build_instance_url(cls, sid):
if not sid:
return "/v1/account"
sid = util.utf8(sid)
base = cls.class_url()
extn = quote_plus(sid)
return "%s/%s" % (base, extn)
def instance_url(self):
return self._build_instance_url(self.get("id"))
def persons(self, **params):
return self.request("get", self.instance_url() + "/persons", params)
def deauthorize(self, **params):
params["stripe_user_id"] = self.id
return oauth.OAuth.deauthorize(**params)
def serialize(self, previous):
params = super(Account, self).serialize(previous)
previous = previous or self._previous or {}
for k, v in six.iteritems(self):
if (
k == "individual"
and isinstance(v, stripe.api_resources.Person)
and k not in params
):
params[k] = v.serialize(previous.get(k, None))
return params
| {
"repo_name": "stripe/stripe-python",
"path": "stripe/api_resources/account.py",
"copies": "1",
"size": "2962",
"license": "mit",
"hash": -3069902717757830700,
"line_mean": 32.2808988764,
"line_max": 90,
"alpha_frac": 0.6596893991,
"autogenerated": false,
"ratio": 3.892247043363995,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5051936442463995,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import stripe
from stripe.six import python_2_unicode_compatible
@python_2_unicode_compatible
class StripeError(Exception):
def __init__(
self,
message=None,
http_body=None,
http_status=None,
json_body=None,
headers=None,
code=None,
):
super(StripeError, self).__init__(message)
if http_body and hasattr(http_body, "decode"):
try:
http_body = http_body.decode("utf-8")
except BaseException:
http_body = (
"<Could not decode body as utf-8. "
"Please report to support@stripe.com>"
)
self._message = message
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
self.headers = headers or {}
self.code = code
self.request_id = self.headers.get("request-id", None)
self.error = self.construct_error_object()
def __str__(self):
msg = self._message or "<empty message>"
if self.request_id is not None:
return u"Request {0}: {1}".format(self.request_id, msg)
else:
return msg
# Returns the underlying `Exception` (base class) message, which is usually
# the raw message returned by Stripe's API. This was previously available
# in python2 via `error.message`. Unlike `str(error)`, it omits "Request
# req_..." from the beginning of the string.
@property
def user_message(self):
return self._message
def __repr__(self):
return "%s(message=%r, http_status=%r, request_id=%r)" % (
self.__class__.__name__,
self._message,
self.http_status,
self.request_id,
)
def construct_error_object(self):
if (
self.json_body is None
or "error" not in self.json_body
or not isinstance(self.json_body["error"], dict)
):
return None
return stripe.api_resources.error_object.ErrorObject.construct_from(
self.json_body["error"], stripe.api_key
)
class APIError(StripeError):
pass
class APIConnectionError(StripeError):
def __init__(
self,
message,
http_body=None,
http_status=None,
json_body=None,
headers=None,
code=None,
should_retry=False,
):
super(APIConnectionError, self).__init__(
message, http_body, http_status, json_body, headers, code
)
self.should_retry = should_retry
class StripeErrorWithParamCode(StripeError):
def __repr__(self):
return (
"%s(message=%r, param=%r, code=%r, http_status=%r, "
"request_id=%r)"
% (
self.__class__.__name__,
self._message,
self.param,
self.code,
self.http_status,
self.request_id,
)
)
class CardError(StripeErrorWithParamCode):
def __init__(
self,
message,
param,
code,
http_body=None,
http_status=None,
json_body=None,
headers=None,
):
super(CardError, self).__init__(
message, http_body, http_status, json_body, headers, code
)
self.param = param
class IdempotencyError(StripeError):
pass
class InvalidRequestError(StripeErrorWithParamCode):
def __init__(
self,
message,
param,
code=None,
http_body=None,
http_status=None,
json_body=None,
headers=None,
):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body, headers, code
)
self.param = param
class AuthenticationError(StripeError):
pass
class PermissionError(StripeError):
pass
class RateLimitError(StripeError):
pass
class SignatureVerificationError(StripeError):
def __init__(self, message, sig_header, http_body=None):
super(SignatureVerificationError, self).__init__(message, http_body)
self.sig_header = sig_header
| {
"repo_name": "stripe/stripe-python",
"path": "stripe/error.py",
"copies": "1",
"size": "4284",
"license": "mit",
"hash": 5739444448933727000,
"line_mean": 25.1219512195,
"line_max": 79,
"alpha_frac": 0.5574229692,
"autogenerated": false,
"ratio": 4.049149338374291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5106572307574291,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import stripe
class TestCreateableAPIResource(object):
class MyCreatable(stripe.api_resources.abstract.CreateableAPIResource):
OBJECT_NAME = "mycreatable"
def test_create(self, request_mock):
request_mock.stub_request(
"post",
"/v1/mycreatables",
{"object": "charge", "foo": "bar"},
rheaders={"request-id": "req_id"},
)
res = self.MyCreatable.create()
request_mock.assert_requested("post", "/v1/mycreatables", {})
assert isinstance(res, stripe.Charge)
assert res.foo == "bar"
assert res.last_response is not None
assert res.last_response.request_id == "req_id"
def test_idempotent_create(self, request_mock):
request_mock.stub_request(
"post",
"/v1/mycreatables",
{"object": "charge", "foo": "bar"},
rheaders={"idempotency-key": "foo"},
)
res = self.MyCreatable.create(idempotency_key="foo")
request_mock.assert_requested(
"post", "/v1/mycreatables", {}, {"Idempotency-Key": "foo"}
)
assert isinstance(res, stripe.Charge)
assert res.foo == "bar"
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/abstract/test_createable_api_resource.py",
"copies": "1",
"size": "1269",
"license": "mit",
"hash": 2387483729871503000,
"line_mean": 29.9512195122,
"line_max": 75,
"alpha_frac": 0.5823483058,
"autogenerated": false,
"ratio": 3.6997084548104957,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4782056760610496,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import stripe
class TestDeletableAPIResource(object):
class MyDeletable(stripe.api_resources.abstract.DeletableAPIResource):
OBJECT_NAME = "mydeletable"
def test_delete_class(self, request_mock):
request_mock.stub_request(
"delete",
"/v1/mydeletables/mid",
{"id": "mid", "deleted": True},
rheaders={"request-id": "req_id"},
)
obj = self.MyDeletable.delete("mid")
request_mock.assert_requested("delete", "/v1/mydeletables/mid", {})
assert obj.deleted is True
assert obj.id == "mid"
assert obj.last_response is not None
assert obj.last_response.request_id == "req_id"
def test_delete_class_with_object(self, request_mock):
request_mock.stub_request(
"delete",
"/v1/mydeletables/mid",
{"id": "mid", "deleted": True},
rheaders={"request-id": "req_id"},
)
obj = self.MyDeletable.construct_from({"id": "mid"}, "mykey")
self.MyDeletable.delete(obj)
request_mock.assert_requested("delete", "/v1/mydeletables/mid", {})
assert obj.deleted is True
assert obj.id == "mid"
assert obj.last_response is not None
assert obj.last_response.request_id == "req_id"
def test_delete_instance(self, request_mock):
request_mock.stub_request(
"delete",
"/v1/mydeletables/mid",
{"id": "mid", "deleted": True},
rheaders={"request-id": "req_id"},
)
obj = self.MyDeletable.construct_from({"id": "mid"}, "mykey")
assert obj is obj.delete()
request_mock.assert_requested("delete", "/v1/mydeletables/mid", {})
assert obj.deleted is True
assert obj.id == "mid"
assert obj.last_response is not None
assert obj.last_response.request_id == "req_id"
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/abstract/test_deletable_api_resource.py",
"copies": "1",
"size": "1970",
"license": "mit",
"hash": -4502785029588460000,
"line_mean": 30.7741935484,
"line_max": 75,
"alpha_frac": 0.5812182741,
"autogenerated": false,
"ratio": 3.7099811676082863,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9791199441708286,
"avg_score": 0,
"num_lines": 62
} |
from __future__ import absolute_import, division, print_function
import stripe
class TestListableAPIResource(object):
class MyListable(stripe.api_resources.abstract.ListableAPIResource):
OBJECT_NAME = "mylistable"
def test_all(self, request_mock):
request_mock.stub_request(
"get",
"/v1/mylistables",
{
"object": "list",
"data": [
{"object": "charge", "name": "jose"},
{"object": "charge", "name": "curly"},
],
"url": "/v1/charges",
"has_more": False,
},
rheaders={"request-id": "req_id"},
)
res = self.MyListable.list()
request_mock.assert_requested("get", "/v1/mylistables", {})
assert len(res.data) == 2
assert all(isinstance(obj, stripe.Charge) for obj in res.data)
assert res.data[0].name == "jose"
assert res.data[1].name == "curly"
assert res.last_response is not None
assert res.last_response.request_id == "req_id"
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/abstract/test_listable_api_resource.py",
"copies": "1",
"size": "1106",
"license": "mit",
"hash": -3809928171207908000,
"line_mean": 31.5294117647,
"line_max": 72,
"alpha_frac": 0.5244122966,
"autogenerated": false,
"ratio": 3.8006872852233675,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4825099581823368,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "bpc_123"
class TestConfiguration(object):
def test_is_creatable(self, request_mock):
resource = stripe.billing_portal.Configuration.create(
business_profile={
"privacy_policy_url": "https://example.com/privacy",
"terms_of_service_url": "https://example.com/tos",
},
features={
"customer_update": {
"allowed_updates": ["address"],
"enabled": True,
}
},
)
request_mock.assert_requested(
"post", "/v1/billing_portal/configurations"
)
assert isinstance(resource, stripe.billing_portal.Configuration)
def test_is_retrievable(self, request_mock):
resource = stripe.billing_portal.Configuration.retrieve(
TEST_RESOURCE_ID
)
request_mock.assert_requested(
"get", "/v1/billing_portal/configurations/%s" % (TEST_RESOURCE_ID)
)
assert isinstance(resource, stripe.billing_portal.Configuration)
def test_is_modifiable(self, request_mock):
resource = stripe.billing_portal.Configuration.modify(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/billing_portal/configurations/%s" % (TEST_RESOURCE_ID)
)
assert isinstance(resource, stripe.billing_portal.Configuration)
def test_is_listable(self, request_mock):
resource = stripe.billing_portal.Configuration.list()
request_mock.assert_requested(
"get", "/v1/billing_portal/configurations"
)
assert isinstance(
resource.data[0], stripe.billing_portal.Configuration
)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/billing_portal/test_configuration.py",
"copies": "1",
"size": "1810",
"license": "mit",
"hash": -381930566739457600,
"line_mean": 34.4901960784,
"line_max": 79,
"alpha_frac": 0.6033149171,
"autogenerated": false,
"ratio": 4.180138568129331,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.528345348522933,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "ch_123"
class TestCharge(object):
def test_is_listable(self, request_mock):
resources = stripe.Charge.list()
request_mock.assert_requested("get", "/v1/charges")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.Charge)
def test_is_retrievable(self, request_mock):
resource = stripe.Charge.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/charges/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Charge)
def test_is_creatable(self, request_mock):
resource = stripe.Charge.create(
amount=100, currency="usd", source="tok_123"
)
request_mock.assert_requested("post", "/v1/charges")
assert isinstance(resource, stripe.Charge)
def test_is_saveable(self, request_mock):
resource = stripe.Charge.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
resource.save()
request_mock.assert_requested(
"post", "/v1/charges/%s" % TEST_RESOURCE_ID
)
def test_is_modifiable(self, request_mock):
resource = stripe.Charge.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/charges/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Charge)
def test_is_refundable(self, request_mock):
charge = stripe.Charge.retrieve(TEST_RESOURCE_ID)
resource = charge.refund()
request_mock.assert_requested(
"post", "/v1/charges/%s/refund" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Charge)
def test_can_capture(self, request_mock):
charge = stripe.Charge.retrieve(TEST_RESOURCE_ID)
resource = charge.capture()
request_mock.assert_requested(
"post", "/v1/charges/%s/capture" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Charge)
def test_can_capture_classmethod(self, request_mock):
resource = stripe.Charge.capture(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/charges/%s/capture" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Charge)
def test_can_update_dispute(self, request_mock):
charge = stripe.Charge.retrieve(TEST_RESOURCE_ID)
resource = charge.update_dispute()
request_mock.assert_requested(
"post", "/v1/charges/%s/dispute" % charge.id
)
assert isinstance(resource, stripe.Dispute)
def test_can_close_dispute(self, request_mock):
charge = stripe.Charge.retrieve(TEST_RESOURCE_ID)
resource = charge.close_dispute()
request_mock.assert_requested(
"post", "/v1/charges/%s/dispute/close" % charge.id
)
assert isinstance(resource, stripe.Dispute)
def test_can_mark_as_fraudulent(self, request_mock):
charge = stripe.Charge.retrieve(TEST_RESOURCE_ID)
resource = charge.mark_as_fraudulent()
request_mock.assert_requested(
"post",
"/v1/charges/%s" % charge.id,
{"fraud_details": {"user_report": "fraudulent"}},
)
assert isinstance(resource, stripe.Charge)
def test_can_mark_as_safe(self, request_mock):
charge = stripe.Charge.retrieve(TEST_RESOURCE_ID)
resource = charge.mark_as_safe()
request_mock.assert_requested(
"post",
"/v1/charges/%s" % charge.id,
{"fraud_details": {"user_report": "safe"}},
)
assert isinstance(resource, stripe.Charge)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/test_charge.py",
"copies": "1",
"size": "3795",
"license": "mit",
"hash": -2756975618088878600,
"line_mean": 35.4903846154,
"line_max": 64,
"alpha_frac": 0.6173913043,
"autogenerated": false,
"ratio": 3.6595949855351977,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9776986289835198,
"avg_score": 0,
"num_lines": 104
} |
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "ipi_123"
class TestTransaction(object):
def test_is_listable(self, request_mock):
resources = stripe.issuing.Transaction.list()
request_mock.assert_requested("get", "/v1/issuing/transactions")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.issuing.Transaction)
def test_is_modifiable(self, request_mock):
resource = stripe.issuing.Transaction.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/issuing/transactions/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.issuing.Transaction)
def test_is_retrievable(self, request_mock):
resource = stripe.issuing.Transaction.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/issuing/transactions/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.issuing.Transaction)
def test_is_saveable(self, request_mock):
resource = stripe.issuing.Transaction.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
transaction = resource.save()
request_mock.assert_requested(
"post", "/v1/issuing/transactions/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.issuing.Transaction)
assert resource is transaction
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/issuing/test_transaction.py",
"copies": "1",
"size": "1515",
"license": "mit",
"hash": 5989643517415009000,
"line_mean": 36.875,
"line_max": 72,
"alpha_frac": 0.6646864686,
"autogenerated": false,
"ratio": 3.75,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.99146864686,
"avg_score": 0,
"num_lines": 40
} |
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "pi_123"
class TestPaymentIntent(object):
def test_is_listable(self, request_mock):
resources = stripe.PaymentIntent.list()
request_mock.assert_requested("get", "/v1/payment_intents")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.PaymentIntent)
def test_is_retrievable(self, request_mock):
resource = stripe.PaymentIntent.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/payment_intents/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.PaymentIntent)
def test_is_creatable(self, request_mock):
resource = stripe.PaymentIntent.create(
amount="1234", currency="amount", payment_method_types=["card"]
)
request_mock.assert_requested("post", "/v1/payment_intents")
assert isinstance(resource, stripe.PaymentIntent)
def test_is_modifiable(self, request_mock):
resource = stripe.PaymentIntent.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post",
"/v1/payment_intents/%s" % TEST_RESOURCE_ID,
{"metadata": {"key": "value"}},
)
assert isinstance(resource, stripe.PaymentIntent)
def test_is_saveable(self, request_mock):
resource = stripe.PaymentIntent.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
resource.save()
request_mock.assert_requested(
"post",
"/v1/payment_intents/%s" % TEST_RESOURCE_ID,
{"metadata": {"key": "value"}},
)
assert isinstance(resource, stripe.PaymentIntent)
def test_can_cancel(self, request_mock):
resource = stripe.PaymentIntent.retrieve(TEST_RESOURCE_ID)
resource.cancel()
request_mock.assert_requested(
"post", "/v1/payment_intents/%s/cancel" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.PaymentIntent)
def test_can_cancel_classmethod(self, request_mock):
resource = stripe.PaymentIntent.cancel(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/payment_intents/%s/cancel" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.PaymentIntent)
def test_can_capture(self, request_mock):
resource = stripe.PaymentIntent.retrieve(TEST_RESOURCE_ID)
resource.capture()
request_mock.assert_requested(
"post", "/v1/payment_intents/%s/capture" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.PaymentIntent)
def test_can_capture_classmethod(self, request_mock):
resource = stripe.PaymentIntent.capture(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/payment_intents/%s/capture" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.PaymentIntent)
def test_can_confirm(self, request_mock):
resource = stripe.PaymentIntent.retrieve(TEST_RESOURCE_ID)
resource.confirm()
request_mock.assert_requested(
"post", "/v1/payment_intents/%s/confirm" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.PaymentIntent)
def test_can_confirm_classmethod(self, request_mock):
resource = stripe.PaymentIntent.confirm(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/payment_intents/%s/confirm" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.PaymentIntent)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/test_payment_intent.py",
"copies": "1",
"size": "3685",
"license": "mit",
"hash": 6618816053934221000,
"line_mean": 37.3854166667,
"line_max": 75,
"alpha_frac": 0.6415196744,
"autogenerated": false,
"ratio": 3.7678936605316973,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4909413334931697,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "po_123"
class TestPayout(object):
def test_is_listable(self, request_mock):
resources = stripe.Payout.list()
request_mock.assert_requested("get", "/v1/payouts")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.Payout)
def test_is_retrievable(self, request_mock):
resource = stripe.Payout.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/payouts/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
def test_is_creatable(self, request_mock):
resource = stripe.Payout.create(amount=100, currency="usd")
request_mock.assert_requested("post", "/v1/payouts")
assert isinstance(resource, stripe.Payout)
def test_is_saveable(self, request_mock):
resource = stripe.Payout.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
resource.save()
request_mock.assert_requested(
"post", "/v1/payouts/%s" % TEST_RESOURCE_ID
)
def test_is_modifiable(self, request_mock):
resource = stripe.Payout.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/payouts/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
def test_can_cancel(self, request_mock):
payout = stripe.Payout.retrieve(TEST_RESOURCE_ID)
resource = payout.cancel()
request_mock.assert_requested(
"post", "/v1/payouts/%s/cancel" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
def test_can_cancel_classmethod(self, request_mock):
resource = stripe.Payout.cancel(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/payouts/%s/cancel" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
def test_can_reverse(self, request_mock):
payout = stripe.Payout.retrieve(TEST_RESOURCE_ID)
resource = payout.reverse()
request_mock.assert_requested(
"post", "/v1/payouts/%s/reverse" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
def test_can_reverse_classmethod(self, request_mock):
resource = stripe.Payout.reverse(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/payouts/%s/reverse" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Payout)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/test_payout.py",
"copies": "1",
"size": "2645",
"license": "mit",
"hash": 7818788064122867000,
"line_mean": 35.2328767123,
"line_max": 67,
"alpha_frac": 0.6325141777,
"autogenerated": false,
"ratio": 3.598639455782313,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4731153633482313,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "price_123"
class TestPrice(object):
def test_is_listable(self, request_mock):
resources = stripe.Price.list()
request_mock.assert_requested("get", "/v1/prices")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.Price)
def test_is_retrievable(self, request_mock):
resource = stripe.Price.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/prices/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Price)
def test_is_creatable(self, request_mock):
resource = stripe.Price.create(
unit_amount=1000,
currency="usd",
recurring={"interval": "month"},
product_data={"name": "price_nickname"},
)
request_mock.assert_requested("post", "/v1/prices")
assert isinstance(resource, stripe.Price)
def test_is_saveable(self, request_mock):
resource = stripe.Price.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
resource.save()
request_mock.assert_requested(
"post", "/v1/prices/%s" % TEST_RESOURCE_ID
)
def test_is_modifiable(self, request_mock):
resource = stripe.Price.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/prices/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Price)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/test_price.py",
"copies": "1",
"size": "1604",
"license": "mit",
"hash": -5493564752895644000,
"line_mean": 32.4166666667,
"line_max": 64,
"alpha_frac": 0.6109725686,
"autogenerated": false,
"ratio": 3.7741176470588234,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48850902156588233,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "promo_123"
class TestPromotionCode(object):
def test_is_listable(self, request_mock):
resources = stripe.PromotionCode.list()
request_mock.assert_requested("get", "/v1/promotion_codes")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.PromotionCode)
def test_is_retrievable(self, request_mock):
resource = stripe.PromotionCode.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/promotion_codes/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.PromotionCode)
def test_is_creatable(self, request_mock):
resource = stripe.PromotionCode.create(coupon="co_123", code="MYCODE")
request_mock.assert_requested("post", "/v1/promotion_codes")
assert isinstance(resource, stripe.PromotionCode)
def test_is_saveable(self, request_mock):
resource = stripe.PromotionCode.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
resource.save()
request_mock.assert_requested(
"post", "/v1/promotion_codes/%s" % TEST_RESOURCE_ID
)
def test_is_modifiable(self, request_mock):
resource = stripe.PromotionCode.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/promotion_codes/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.PromotionCode)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/test_promotion_code.py",
"copies": "1",
"size": "1594",
"license": "mit",
"hash": 7691864368005067000,
"line_mean": 36.0697674419,
"line_max": 78,
"alpha_frac": 0.6562107905,
"autogenerated": false,
"ratio": 3.6475972540045767,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9803808044504576,
"avg_score": 0,
"num_lines": 43
} |
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "si_123"
class TestSubscriptionItem(object):
def test_is_listable(self, request_mock):
resources = stripe.SubscriptionItem.list(subscription="sub_123")
request_mock.assert_requested(
"get", "/v1/subscription_items", {"subscription": "sub_123"}
)
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.SubscriptionItem)
def test_is_retrievable(self, request_mock):
resource = stripe.SubscriptionItem.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/subscription_items/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.SubscriptionItem)
def test_is_creatable(self, request_mock):
resource = stripe.SubscriptionItem.create(
price="price_123", subscription="sub_123"
)
request_mock.assert_requested("post", "/v1/subscription_items")
assert isinstance(resource, stripe.SubscriptionItem)
def test_is_saveable(self, request_mock):
resource = stripe.SubscriptionItem.retrieve(TEST_RESOURCE_ID)
resource.price = "price_123"
resource.save()
request_mock.assert_requested(
"post",
"/v1/subscription_items/%s" % TEST_RESOURCE_ID,
{"price": "price_123"},
)
def test_is_modifiable(self, request_mock):
resource = stripe.SubscriptionItem.modify(
TEST_RESOURCE_ID, price="price_123"
)
request_mock.assert_requested(
"post",
"/v1/subscription_items/%s" % TEST_RESOURCE_ID,
{"price": "price_123"},
)
assert isinstance(resource, stripe.SubscriptionItem)
def test_is_deletable(self, request_mock):
resource = stripe.SubscriptionItem.retrieve(TEST_RESOURCE_ID)
resource.delete()
request_mock.assert_requested(
"delete", "/v1/subscription_items/%s" % TEST_RESOURCE_ID
)
assert resource.deleted is True
def test_can_delete(self, request_mock):
resource = stripe.SubscriptionItem.delete(TEST_RESOURCE_ID)
request_mock.assert_requested(
"delete", "/v1/subscription_items/%s" % TEST_RESOURCE_ID
)
assert resource.deleted is True
class TestUsageRecords(object):
def test_is_creatable(self, request_mock):
resource = stripe.SubscriptionItem.create_usage_record(
TEST_RESOURCE_ID,
quantity=5000,
timestamp=1524182400,
action="increment",
)
request_mock.assert_requested(
"post",
"/v1/subscription_items/%s/usage_records" % TEST_RESOURCE_ID,
)
assert isinstance(resource, stripe.UsageRecord)
class TestUsageRecordSummaries(object):
def test_is_listable(self, request_mock):
resource = stripe.SubscriptionItem.list_usage_record_summaries(
TEST_RESOURCE_ID
)
request_mock.assert_requested(
"get",
"/v1/subscription_items/%s/usage_record_summaries"
% TEST_RESOURCE_ID,
)
assert isinstance(resource.data, list)
assert isinstance(resource.data[0], stripe.UsageRecordSummary)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/test_subscription_item.py",
"copies": "1",
"size": "3357",
"license": "mit",
"hash": 8901906822958276000,
"line_mean": 34.3368421053,
"line_max": 73,
"alpha_frac": 0.6276437295,
"autogenerated": false,
"ratio": 3.991676575505351,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.511932030500535,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "tu_123"
class TestTopup(object):
def test_is_listable(self, request_mock):
resources = stripe.Topup.list()
request_mock.assert_requested("get", "/v1/topups")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.Topup)
def test_is_retrievable(self, request_mock):
resource = stripe.Topup.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/topups/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Topup)
def test_is_creatable(self, request_mock):
resource = stripe.Topup.create(
amount=100,
currency="usd",
source="src_123",
description="description",
statement_descriptor="statement descriptor",
)
request_mock.assert_requested("post", "/v1/topups")
assert isinstance(resource, stripe.Topup)
def test_is_saveable(self, request_mock):
resource = stripe.Topup.retrieve(TEST_RESOURCE_ID)
resource.metadata["key"] = "value"
resource.save()
request_mock.assert_requested(
"post", "/v1/topups/%s" % TEST_RESOURCE_ID
)
def test_is_modifiable(self, request_mock):
resource = stripe.Topup.modify(
TEST_RESOURCE_ID, metadata={"key": "value"}
)
request_mock.assert_requested(
"post", "/v1/topups/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Topup)
def test_can_cancel(self, request_mock):
resource = stripe.Topup.retrieve(TEST_RESOURCE_ID)
resource = resource.cancel()
request_mock.assert_requested(
"post", "/v1/topups/%s/cancel" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Topup)
def test_can_cancel_classmethod(self, request_mock):
resource = stripe.Topup.cancel(TEST_RESOURCE_ID)
request_mock.assert_requested(
"post", "/v1/topups/%s/cancel" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.Topup)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/test_topup.py",
"copies": "1",
"size": "2202",
"license": "mit",
"hash": -332674177955589060,
"line_mean": 33.40625,
"line_max": 64,
"alpha_frac": 0.6162579473,
"autogenerated": false,
"ratio": 3.6946308724832213,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4810888819783221,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import subprocess
import textwrap
from virtualenv._compat import check_output
from virtualenv.builders.base import BaseBuilder
_SCRIPT = """
import venv
# Create our actual builder with our settings.
builder = venv.EnvBuilder(
system_site_packages={system_site_packages!r},
symlinks=False,
upgrade=True,
)
# Make sure that pip is actually disabled.
builder.with_pip = False
# Don't install the activate scripts, we'll want to install our own
# instead.
builder.install_scripts = lambda *a, **kw: None
# Create the virtual environment.
builder.create({destination!r})
"""
class VenvBuilder(BaseBuilder):
@classmethod
def check_available(cls, python):
try:
check_output([
# Bail if any Python has be Debian-ized: venv is broken beyond any workaround we can do here - resulting
# venvs will use "local/lib" instead of "lib" and have "dist-packages" instead of "site-packages"
# TODO: report this terrible mess upstream
python,
"-c",
textwrap.dedent("""
import venv, sysconfig
from sysconfig import get_scheme_names
from distutils.command.install import INSTALL_SCHEMES
if 'posix_local' in sysconfig.get_scheme_names() or 'deb_system' in INSTALL_SCHEMES:
raise RuntimeError("there are Debian patches")
""")
], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return False
else:
return True
def create_virtual_environment(self, destination):
# Create our script using our template and the given settings for
# this environment.
script = _SCRIPT.format(
system_site_packages=self.system_site_packages,
destination=destination,
)
# Subshell into the venv module and create a new virtual environment.
# We use the programatic API instead of the command line because we
# want to not include pip in this, and Python 3.3's venv module doesn't
# support the --without-pip flag.
subprocess.check_call([self._python_bin, "-c", script])
| {
"repo_name": "ionelmc/virtualenv",
"path": "virtualenv/builders/venv.py",
"copies": "1",
"size": "2305",
"license": "mit",
"hash": -1167503207012512300,
"line_mean": 32.4057971014,
"line_max": 120,
"alpha_frac": 0.6407809111,
"autogenerated": false,
"ratio": 4.475728155339806,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5616509066439805,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
from functools import partial
import json as json_module
from datashape.predicates import iscollection, istabular, isscalar
from odo import odo
import pandas as pd
from pandas.compat import BytesIO
from pandas.io.packers import (
unpack as msgpack_unpack,
decode as pd_msgpack_object_hook,
encode as pd_msgpack_default,
)
import pandas.msgpack as msgpack_module
from toolz import identity
from blaze.compatibility import pickle as pickle_module, unicode, PY2
from blaze.interactive import coerce_scalar
from .json_dumps import json_dumps
from .json_dumps_trusted import json_dumps_trusted
from .object_hook import object_hook
from .object_hook_trusted import object_hook_trusted
class SerializationFormat(object):
"""A serialization format for the blaze server and blaze client.
Parameters
----------
name : str
The name of the format. This is used on the mediatype to select
the proper format.
loads : callable[bytes -> any]
The function that loads python objects out of the serialized data.
dumps : callable[any -> bytes]
The function that serializes python objects to some other format.
data_loads : callable[any -> any], optional
data_dumps : callable[any -> any], optional
Specialized functions for loading and writing only the 'data' field of
blaze server responses. This allows us to define a more efficient
serialization format for moving large amounts of data while still
having a rich representation for the rest of the metadata.
materialize : callable[(any, DataShape, **kwargs) -> any], optional
The function used to materialize the result of compute into a form
suitable for serialization.
"""
def __init__(self,
name,
loads,
dumps,
data_loads=None,
data_dumps=None,
materialize=None):
self.name = name
self.loads = loads
self.dumps = dumps
self.data_loads = identity if data_loads is None else data_loads
self.data_dumps = identity if data_dumps is None else data_dumps
self.materialize = (
default_materialize
if materialize is None else
materialize
)
def __repr__(self):
return '<%s: %r>' % (type(self).__name__, self.name)
__str__ = __repr__
def default_materialize(data, dshape, odo_kwargs):
if iscollection(dshape):
return odo(data, list, **odo_kwargs)
if isscalar(dshape):
return coerce_scalar(data, str(dshape), odo_kwargs)
return data
def _coerce_str(bytes_or_str):
if isinstance(bytes_or_str, unicode):
return bytes_or_str
return bytes_or_str.decode('utf-8')
json = SerializationFormat(
'json',
lambda data: json_module.loads(_coerce_str(data), object_hook=object_hook),
partial(json_module.dumps, default=json_dumps),
)
json_trusted = SerializationFormat(
'json_trusted',
lambda data: json_module.loads(_coerce_str(data), object_hook=object_hook_trusted),
partial(json_module.dumps, default=json_dumps_trusted)
)
pickle = SerializationFormat(
'pickle',
pickle_module.loads,
partial(pickle_module.dumps, protocol=pickle_module.HIGHEST_PROTOCOL),
)
msgpack = SerializationFormat(
'msgpack',
partial(msgpack_module.unpackb, encoding='utf-8', object_hook=object_hook),
partial(msgpack_module.packb, default=json_dumps),
)
msgpack_trusted = SerializationFormat(
'msgpack_trusted',
partial(msgpack_module.unpackb, encoding='utf-8', object_hook=object_hook_trusted),
partial(msgpack_module.packb, default=json_dumps_trusted),
)
try:
import blosc
del blosc
compress = 'blosc'
except ImportError:
compress = None
def _final_base(values):
base = values.base
while True:
try:
base = base.base
except AttributeError:
return base
def fastmsgpack_object_hook(ob):
typ = ob.get('typ')
if typ is None:
return ob
if typ == 'nat':
return pd.NaT
if typ == 'block_manager':
df = pd_msgpack_object_hook(ob)
for block in df._data.blocks:
values = block.values
if not values.flags.writeable:
if sys.getrefcount(_final_base(values)) == 2:
# Compatibility shim until this is fixed in pandas
# See: https://github.com/pydata/pandas/pull/12359
# Mark that we own the memory for this block.
# We know that we own it because the only 2 references
# for the base object are the values of this block and
# TOS because we are passing this object to a function.
values.flags.writeable = True
else:
# If we cannot 'safely' make the bytes object mutable
# then just copy the data.
block.values = values.copy()
return df
return pd_msgpack_object_hook(ob)
if PY2:
def _l1(bs):
if isinstance(bs, unicode):
return bs.encode('latin1')
return bs
def _str(bs):
if isinstance(bs, str):
return bs.decode('utf-8')
return bs
else:
def _l1(bs):
return bs
def _str(bs):
return bs
def fastmsgpack_data_loads(data):
raw = list(msgpack_unpack(
BytesIO(_l1(data)),
object_hook=fastmsgpack_object_hook,
))
# raw will always be a list, which is most likely a list containing
# a single dataframe or series
if len(raw) == 1:
# we only serialized one structure, just return it
return raw[0]
return raw
def fastmsgpack_loads(data, object_hook=object_hook):
raw = list(msgpack_unpack(
BytesIO(_l1(data)),
object_hook=object_hook,
))
# raw will always be a list, which is most likely a list containing
# a single dataframe or series
if len(raw) == 1:
# we only serialized one structure, just return it
return raw[0]
return raw
def fastmsgpack_default(ob):
# Compatibility shim until this is fixed in pandas
# See: https://github.com/pydata/pandas/pull/12307
if ob is pd.NaT:
return {'typ': 'nat'}
return pd_msgpack_default(ob)
def fastmsgpack_data_dumps(data):
return {
'__!bytes': pd.to_msgpack(
None,
data,
compress=compress,
default=fastmsgpack_default,
encoding='utf-8',
use_bin_type=True,
),
}
def fastmsgpack_dumps(data, default=json_dumps):
return pd.to_msgpack(
None,
data,
compress=compress,
default=default,
encoding='utf-8',
use_bin_type=True,
)
def fastmsgpack_materialize(data, dshape, odo_kwargs):
if istabular(dshape):
return odo(data, pd.DataFrame, **odo_kwargs)
if iscollection(dshape):
return odo(data, pd.Series, **odo_kwargs)
if isscalar(dshape):
return coerce_scalar(data, str(dshape), odo_kwargs)
return data
fastmsgpack = SerializationFormat(
'fastmsgpack',
loads=fastmsgpack_loads,
dumps=fastmsgpack_dumps,
data_loads=fastmsgpack_data_loads,
data_dumps=fastmsgpack_data_dumps,
materialize=fastmsgpack_materialize,
)
fastmsgpack_trusted = SerializationFormat(
'fastmsgpack_trusted',
loads=partial(fastmsgpack_loads, object_hook=object_hook_trusted),
dumps=partial(fastmsgpack_dumps, default=json_dumps_trusted),
data_loads=fastmsgpack_data_loads,
data_dumps=fastmsgpack_data_dumps,
materialize=fastmsgpack_materialize,
)
all_formats = frozenset(g for g in globals().values()
if isinstance(g, SerializationFormat))
trusted_formats = frozenset([json_trusted,
msgpack_trusted,
fastmsgpack_trusted])
__all__ = ['SerializationFormat', 'all_formats', 'trusted_formats'] + list(f.name for f in all_formats)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/server/serialization/serialization.py",
"copies": "3",
"size": "8201",
"license": "bsd-3-clause",
"hash": -4097510976258776600,
"line_mean": 28.7137681159,
"line_max": 103,
"alpha_frac": 0.6323619071,
"autogenerated": false,
"ratio": 3.8885727833096255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6020934690409625,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
from operator import itemgetter
import operator
from toolz import compose, identity
from collections import Iterator
from datashape import Record, Tuple
from blaze.expr import *
from blaze.expr import count as Count
from . import core, python
from .python import (compute, rrowfunc, rowfunc, ElemWise, pair_assemble,
reduce_by_funcs, binops, like_regex_predicate)
from ..compatibility import builtins, unicode
from ..expr import reductions
from ..dispatch import dispatch
from ..data.utils import listpack
from .core import compute, compute_up
from toolz.curried import get
__all__ = ['RDD', 'pyspark', 'SparkContext']
class Dummy(object):
sum = max = min = count = distinct = mean = variance = stdev = None
try:
from pyspark import SparkContext
import pyspark
from pyspark.rdd import RDD
except ImportError:
SparkContext = Dummy
pyspark = Dummy()
pyspark.rdd = Dummy()
RDD = Dummy
# PySpark adds a SIGCHLD signal handler, but that breaks other packages, so we
# remove it
# See https://issues.apache.org/jira/browse/SPARK-1394
try:
import signal
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
except:
pass
@dispatch(ElemWise, RDD)
def compute_up(t, rdd, **kwargs):
func = rowfunc(t)
return rdd.map(func)
@dispatch(Selection, RDD)
def compute_up(t, rdd, **kwargs):
predicate = rrowfunc(t.predicate, t._child)
return rdd.filter(predicate)
rdd_reductions = {
reductions.sum: RDD.sum,
reductions.min: RDD.min,
reductions.max: RDD.max,
reductions.count: RDD.count,
reductions.mean: RDD.mean,
reductions.var: RDD.variance,
reductions.std: RDD.stdev,
reductions.nunique: compose(RDD.count, RDD.distinct)}
@dispatch(tuple(rdd_reductions), RDD)
def compute_up(t, rdd, **kwargs):
return rdd_reductions[type(t)](rdd)
def istruthy(x):
return not not x
@dispatch(reductions.any, RDD)
def compute_up(t, rdd, **kwargs):
return istruthy(rdd.filter(identity).take(1))
@dispatch(reductions.all, RDD)
def compute_up(t, rdd, **kwargs):
return not rdd.filter(lambda x: not x).take(1)
@dispatch(Head, RDD)
def compute_up(t, rdd, **kwargs):
return rdd.take(t.n)
@dispatch(Sort, RDD)
def compute_up(t, rdd, **kwargs):
if isinstance(t.key, (str, unicode, tuple, list)):
key = rowfunc(t._child[t.key])
else:
key = rrowfunc(t.key, t._child)
return (rdd.keyBy(key)
.sortByKey(ascending=t.ascending)
.map(lambda x: x[1]))
@dispatch(Distinct, RDD)
def compute_up(t, rdd, **kwargs):
return rdd.distinct()
@dispatch(Join, RDD, RDD)
def compute_up(t, lhs, rhs, **kwargs):
on_left = rowfunc(t.lhs[t.on_left])
on_right = rowfunc(t.rhs[t.on_right])
lhs = lhs.keyBy(on_left)
rhs = rhs.keyBy(on_right)
if t.how == 'inner':
rdd = lhs.join(rhs)
elif t.how == 'left':
rdd = lhs.leftOuterJoin(rhs)
elif t.how == 'right':
rdd = lhs.rightOuterJoin(rhs)
elif t.how == 'outer':
# https://issues.apache.org/jira/browse/SPARK-546
raise NotImplementedError("Spark does not yet support full outer join")
assemble = pair_assemble(t)
return rdd.map(lambda x: assemble(x[1]))
python_reductions = {
reductions.sum: builtins.sum,
reductions.count: builtins.len,
reductions.max: builtins.max,
reductions.min: builtins.min,
reductions.any: builtins.any,
reductions.all: builtins.all,
reductions.mean: python._mean,
reductions.var: python._var,
reductions.std: python._std,
reductions.nunique: lambda x: len(set(x))}
@dispatch(By, RDD)
def compute_up(t, rdd, **kwargs):
if ((isinstance(t.apply, Reduction) and type(t.apply) in binops) or
(isinstance(t.apply, Summary) and builtins.all(type(val) in binops
for val in t.apply.values))):
grouper, binop, combiner, initial = reduce_by_funcs(t)
if isscalar(t.grouper.dshape.measure):
keyfunc = lambda x: (x,)
else:
keyfunc = identity
if isscalar(t.apply.dshape.measure):
valfunc = lambda x: (x,)
else:
valfunc = identity
unpack = lambda kv: keyfunc(kv[0]) + valfunc(kv[1])
create = lambda v: binop(initial, v)
return (rdd.keyBy(grouper)
.combineByKey(create, binop, combiner)
.map(unpack))
d = reduceby(grouper, binop, seq, initial)
else:
raise NotImplementedError("By only implemented for common reductions."
"\nGot %s" % type(t.apply))
@dispatch((Label, ReLabel), RDD)
def compute_up(t, rdd, **kwargs):
return rdd
@dispatch(Summary, RDD)
def compute_up(t, rdd, **kwargs):
rdd = rdd.cache()
return tuple(compute(value, {t._child: rdd}) for value in t.values)
@dispatch(Like, RDD)
def compute_up(t, rdd, **kwargs):
predicate = like_regex_predicate(t)
return rdd.filter(predicate)
@dispatch(RDD, RDD)
def into(a, b):
return b
@dispatch(SparkContext, (list, tuple, Iterator))
def into(sc, seq):
return sc.parallelize(seq)
@dispatch(RDD, (list, tuple))
def into(rdd, seq):
return into(rdd.context, seq)
@dispatch(list, RDD)
def into(seq, rdd):
return rdd.collect()
@dispatch(Union, RDD, tuple)
def compute_up(t, _, children):
return reduce(RDD.union, children)
| {
"repo_name": "vitan/blaze",
"path": "blaze/compute/spark.py",
"copies": "1",
"size": "5633",
"license": "bsd-3-clause",
"hash": 1807276694759420400,
"line_mean": 24.9585253456,
"line_max": 79,
"alpha_frac": 0.6309249068,
"autogenerated": false,
"ratio": 3.3690191387559807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4499944045555981,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
from operator import itemgetter
import operator
from toolz import compose, identity
from toolz.curried import get
from blaze.expr.table import *
from blaze.expr.table import count as Count
from . import core, python
from .python import compute, rowfunc, RowWise
from ..compatibility import builtins
from ..expr import table
from ..dispatch import dispatch
try:
from itertools import compress, chain
import pyspark
from pyspark.rdd import RDD
except ImportError:
#Create a dummy RDD for py 2.6
class Dummy(object):
sum = max = min = count = distinct = mean = variance = stdev = None
pyspark = Dummy()
pyspark.rdd = Dummy()
RDD = Dummy
# PySpark adds a SIGCHLD signal handler, but that breaks other packages, so we
# remove it
# See https://issues.apache.org/jira/browse/SPARK-1394
try:
import signal
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
except:
pass
@dispatch(RowWise, RDD)
def compute(t, rdd):
rdd = compute(t.parent, rdd)
func = rowfunc(t)
return rdd.map(func)
@dispatch(TableSymbol, RDD)
def compute(t, s):
return s
@dispatch(Selection, RDD)
def compute(t, rdd):
rdd = compute(t.parent, rdd)
predicate = rowfunc(t.predicate)
return rdd.filter(predicate)
rdd_reductions = {
table.sum: RDD.sum,
table.min: RDD.min,
table.max: RDD.max,
table.count: RDD.count,
table.mean: RDD.mean,
table.var: RDD.variance,
table.std: RDD.stdev,
table.nunique: compose(RDD.count, RDD.distinct)}
@dispatch(tuple(rdd_reductions), RDD)
def compute(t, rdd):
reduction = rdd_reductions[type(t)]
return reduction(compute(t.parent, rdd))
def istruthy(x):
return not not x
@dispatch(table.any, RDD)
def compute(t, rdd):
rdd = compute(t.parent, rdd)
return istruthy(rdd.filter(identity).take(1))
@dispatch(table.all, RDD)
def compute(t, rdd):
rdd = compute(t.parent, rdd)
return not rdd.filter(lambda x: not x).take(1)
@dispatch(Head, RDD)
def compute(t, rdd):
rdd = compute(t.parent, rdd)
return rdd.take(t.n)
@dispatch(Sort, RDD)
def compute(t, rdd):
rdd = compute(t.parent, rdd)
func = rowfunc(t[t.column])
return (rdd.keyBy(func)
.sortByKey(ascending=t.ascending)
.map(lambda x: x[1]))
@dispatch(Distinct, RDD)
def compute(t, rdd):
rdd = compute(t.parent, rdd)
return rdd.distinct()
@dispatch(Join, RDD, RDD)
def compute(t, lhs, rhs):
lhs = compute(t.lhs, lhs)
rhs = compute(t.rhs, rhs)
col_idx_lhs = t.lhs.columns.index(t.on_left)
col_idx_rhs = t.rhs.columns.index(t.on_right)
lhs = lhs.keyBy(lambda x: x[col_idx_lhs])
rhs = rhs.keyBy(lambda x: x[col_idx_rhs])
# Calculate the indices we want in the joined table
columns = t.lhs.columns + t.rhs.columns
repeated_index = len(columns) - columns[::-1].index(t.on_right) - 1
wanted = list(range(len(columns)))
wanted.pop(repeated_index)
getter = get(wanted)
reassemble = lambda x: getter(x[1][0] + x[1][1])
return lhs.join(rhs).map(reassemble)
python_reductions = {
table.sum: builtins.sum,
table.count: builtins.len,
table.max: builtins.max,
table.min: builtins.min,
table.any: builtins.any,
table.all: builtins.all,
table.mean: python._mean,
table.var: python._var,
table.std: python._std,
table.nunique: lambda x: len(set(x))}
@dispatch(By, RDD)
def compute(t, rdd):
rdd = compute(t.parent, rdd)
try:
reduction = python_reductions[type(t.apply)]
except KeyError:
raise NotImplementedError("By only implemented for common reductions."
"\nGot %s" % type(t.apply))
grouper = rowfunc(t.grouper)
pre = rowfunc(t.apply.parent)
groups = (rdd.map(lambda x: (grouper(x), pre(x)))
.groupByKey())
if isinstance(t.grouper, (Column, ColumnWise)):
func = lambda x: (x[0], reduction(x[1]))
else:
func = lambda x: (tuple(x[0]) + (reduction(x[1]),))
return groups.map(func)
@dispatch((Label, ReLabel), RDD)
def compute(t, rdd):
return compute(t.parent, rdd)
@dispatch(RDD, RDD)
def into(a, b):
return b
@dispatch(RDD, (list, tuple))
def into(rdd, seq):
return rdd.context.parallelize(seq)
@dispatch(list, RDD)
def into(seq, rdd):
return rdd.collect()
| {
"repo_name": "aterrel/blaze",
"path": "blaze/compute/spark.py",
"copies": "1",
"size": "4553",
"license": "bsd-3-clause",
"hash": 3738953122414901000,
"line_mean": 23.3475935829,
"line_max": 78,
"alpha_frac": 0.6329892379,
"autogenerated": false,
"ratio": 3.261461318051576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4394450555951576,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
from subprocess import Popen, check_output, PIPE
from os.path import islink, isfile
from itertools import islice
NO_EXT = (
'.py', '.pyc', '.pyo', '.h', '.a', '.c', '.txt', '.html',
'.xml', '.png', '.jpg', '.gif', '.class',
)
MAGIC = {
b'\xca\xfe\xba\xbe': 'MachO-universal',
b'\xce\xfa\xed\xfe': 'MachO-i386',
b'\xcf\xfa\xed\xfe': 'MachO-x86_64',
b'\xfe\xed\xfa\xce': 'MachO-ppc',
b'\xfe\xed\xfa\xcf': 'MachO-ppc64',
}
FILETYPE = {
1: 'MH_OBJECT',
2: 'MH_EXECUTE',
3: 'MH_FVMLIB',
4: 'MH_CORE',
5: 'MH_PRELOAD',
6: 'MH_DYLIB',
7: 'MH_DYLINKER',
8: 'MH_BUNDLE',
9: 'MH_DYLIB_STUB',
10: 'MH_DSYM',
11: 'MH_KEXT_BUNDLE',
}
def is_macho(path):
if path.endswith(NO_EXT) or islink(path) or not isfile(path):
return False
with open(path, 'rb') as fi:
head = fi.read(4)
return bool(head in MAGIC)
def is_dylib(path):
return human_filetype(path) == 'DYLIB'
def human_filetype(path):
lines = check_output(['otool', '-h', path]).decode('utf-8').splitlines()
assert lines[0].startswith(path), path
for line in lines:
if line.strip().startswith('0x'):
header = line.split()
filetype = int(header[4])
return FILETYPE[filetype][3:]
def is_dylib_info(lines):
dylib_info = ('LC_ID_DYLIB', 'LC_LOAD_DYLIB')
if len(lines) > 1 and lines[1].split()[1] in dylib_info:
return True
return False
def is_id_dylib(lines):
if len(lines) > 1 and lines[1].split()[1] == 'LC_ID_DYLIB':
return True
return False
def is_load_dylib(lines):
if len(lines) > 1 and lines[1].split()[1] == 'LC_LOAD_DYLIB':
return True
return False
def is_rpath(lines):
if len(lines) > 1 and lines[1].split()[1] == 'LC_RPATH':
return True
return False
def _get_load_commands(lines):
"""yields each load command from the output of otool -l"""
a = 1 # first line is the filename.
for ln, line in enumerate(lines):
if line.startswith("Load command"):
if a < ln:
yield lines[a:ln]
a = ln
yield lines[a:]
def _get_matching_load_commands(lines, cb_filter):
"""Workhorse function for otool
Does the work of filtering load commands and making a list
of dicts. The logic for splitting the free-form lines into
keys and values in entirely encoded here. Values that can
be converted to ints are converted to ints.
"""
result = []
for lcmds in _get_load_commands(lines):
if cb_filter(lcmds):
lcdict = {}
for line in islice(lcmds, 1, len(lcmds)):
listy = line.split()
# This could be prettier, but what we need it to handle
# is fairly simple so let's just hardcode it for speed.
if len(listy) == 2:
key, value = listy
elif listy[0] == 'name' or listy[0] == 'path':
# Create an entry for 'name offset' if there is one
# as that can be useful if we need to know if there
# is space to patch it for relocation purposes.
if listy[2] == '(offset':
key = listy[0] + ' offset'
value = int(listy[3][:-1])
lcdict[key] = value
key, value = listy[0:2]
elif listy[0] == 'time':
key = ' '.join(listy[0:3])
value = ' '.join(listy[3:])
elif listy[0] in ('current', 'compatibility'):
key = ' '.join(listy[0:2])
value = listy[2]
try:
value = int(value)
except:
pass
lcdict[key] = value
result.append(lcdict)
return result
def otool(path, cb_filter=is_dylib_info):
"""A wrapper around otool -l
Parse the output of the otool -l 'load commands', filtered by
cb_filter, returning a list of dictionairies for the records.
cb_filter receives the whole load command record, including the
first line, the 'Load Command N' one. All the records have been
pre-stripped of white space.
The output of otool -l is entirely freeform; delineation between
key and value doesn't formally exist, so that is hard coded. I
didn't want to use regexes to parse it for speed purposes.
Any key values that can be converted to integers are converted
to integers, the rest are strings.
"""
lines = check_output(['otool', '-l', path]).decode('utf-8').splitlines()
return _get_matching_load_commands(lines, cb_filter)
def get_dylibs(path):
"""Return a list of the loaded dylib pathnames"""
dylib_loads = otool(path, is_load_dylib)
return [dylib_load['name'] for dylib_load in dylib_loads]
def get_id(path):
"""Returns the id name of the Mach-O file `path` or an empty string"""
dylib_loads = otool(path, is_id_dylib)
try:
return [dylib_load['name'] for dylib_load in dylib_loads][0]
except:
return ''
def get_rpaths(path):
"""Return a list of the dylib rpaths"""
rpaths = otool(path, is_rpath)
return [rpath['path'] for rpath in rpaths]
def add_rpath(path, rpath, verbose = False):
"""Add an `rpath` to the Mach-O file at `path`"""
args = ['install_name_tool', '-add_rpath', rpath, path]
if verbose:
print(' '.join(args))
p = Popen(args, stderr=PIPE)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8')
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s\n" % path)
return
elif "would duplicate path, file already has LC_RPATH for:" in stderr:
print("Skipping -add_rpath, file already has LC_RPATH set")
return
else:
print(stderr, file=sys.stderr)
if p.returncode:
raise RuntimeError("install_name_tool failed with exit status %d"
% p.returncode)
def delete_rpath(path, rpath, verbose = False):
"""Delete an `rpath` from the Mach-O file at `path`"""
args = ['install_name_tool', '-delete_rpath', rpath, path]
if verbose:
print(' '.join(args))
p = Popen(args, stderr=PIPE)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8')
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s\n" % path)
return
elif "no LC_RPATH load command with path:" in stderr:
print("Skipping -delete_rpath, file doesn't contain that LC_RPATH")
return
else:
print(stderr, file=sys.stderr)
if p.returncode:
raise RuntimeError("install_name_tool failed with exit status %d"
% p.returncode)
def install_name_change(path, cb_func, verbose = False):
"""Change dynamic shared library load name or id name of Mach-O Binary `path`.
`cb_func` is called for each shared library load command. The dictionary of
the load command is passed in and the callback returns the new name or None
if the name should be unchanged.
When dealing with id load commands, `install_name_tool -id` is used.
When dealing with dylib load commands `install_name_tool -change` is used.
"""
dylibs = otool(path)
changes = []
for index, dylib in enumerate(dylibs):
new_name = cb_func(path, dylib)
if new_name:
changes.append((index, new_name))
ret = True
for index, new_name in changes:
args = ['install_name_tool']
if dylibs[index]['cmd'] == 'LC_ID_DYLIB':
args.extend(('-id', new_name, path))
else:
args.extend(('-change', dylibs[index]['name'], new_name, path))
if verbose:
print(' '.join(args))
p = Popen(args, stderr=PIPE)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8')
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s" % path)
ret = False
continue
else:
print(stderr, file=sys.stderr)
if p.returncode:
raise RuntimeError("install_name_tool failed with exit status %d"
% p.returncode)
return ret
if __name__ == '__main__':
if sys.platform == 'darwin':
for path in '/bin/ls', '/etc/locate.rc':
print(path, is_macho(path))
| {
"repo_name": "dan-blanchard/conda-build",
"path": "conda_build/macho.py",
"copies": "2",
"size": "8684",
"license": "bsd-3-clause",
"hash": -4230571745856221000,
"line_mean": 31.7698113208,
"line_max": 82,
"alpha_frac": 0.5826807923,
"autogenerated": false,
"ratio": 3.589913187267466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015012466283538656,
"num_lines": 265
} |
from __future__ import absolute_import, division, print_function
import sys
from sunpy.extern.six.moves import range
__all__ = ['TTYProgressBar']
class TTYProgressBar(object):
"""
A simple progress bar to visualize progress on a TTY (teletypewriter).
It needs to support '\b' to delete characters.
The ProgressBar interface is start, finish, draw and poke.
"""
SYMBOL = '='
LEFT_BORDER = '['
RIGHT_BORDER = ']'
def __init__(self, n, current=0, width=40, output=sys.stdout):
"""
Parameters
----------
n : int
Total number of items until completion
current : int
Current state of completion
width : int
Width of progress bar.
output : file
Teletypewriter to print on.
"""
self.n = n
self.current = current
self.width = width
# Future division, so it is float.
self.step = self.n / self.width
self.output = output
def start(self):
"""
Draw empty bar to output.
"""
self.output.write(
self.LEFT_BORDER + " " * (len(self.SYMBOL) * self.width) +
self.RIGHT_BORDER
)
self.output.flush()
self.output.write("\b" * (self.width+len(self.RIGHT_BORDER)))
def finish(self):
"""
Finish the bar, the ProgressBar cannot be used after this
method was called.
"""
print()
def _draw_one(self):
"""
Advance progress bar by one.
"""
self.output.write(self.SYMBOL)
def draw(self):
"""
Draw current state of progress bar onto and empty bar.
"""
cur = self.current
self.current = 0
for _ in range(cur):
self.poke()
def poke(self, n=1):
"""
Increase finished items by n. May advance the progress bar by one
or more fields.
"""
if self.current > self.n:
raise ValueError("ProgressBar overflowed.")
diff = int((self.current + n) / self.step) - int(self.current / self.step)
for _ in range(diff):
self._draw_one()
self.current += n
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/util/progressbar.py",
"copies": "1",
"size": "2234",
"license": "bsd-2-clause",
"hash": 3024592627663230500,
"line_mean": 25.9156626506,
"line_max": 82,
"alpha_frac": 0.5384959714,
"autogenerated": false,
"ratio": 4.160148975791434,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5198644947191434,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import gzip
from unittest import TestCase
from datashape import dshape
from blaze.data import JSON, Stack
from blaze.utils import filetexts
from blaze.compatibility import xfail
import json
is_py2_win = sys.platform == 'win32' and sys.version_info < (3, 0)
data = {'a.json': {u'name': u'Alice', u'amount': 100},
'b.json': {u'name': u'Bob', u'amount': 200},
'c.json': {u'name': u'Charlie', u'amount': 50}}
tuples = (('Alice', 100),
('Bob', 200),
('Charlie', 50))
texts = dict((fn, json.dumps(val)) for fn, val in data.items())
schema = '{name: string, amount: int}'
class Test_Integrative(TestCase):
@xfail(is_py2_win, reason='Win32 py2.7 unicode/gzip/eol needs sorting out')
def test_gzip_json_files(self):
with filetexts(texts, open=gzip.open) as filenames:
descriptors = [JSON(fn, dshape=schema, open=gzip.open)
for fn in sorted(filenames)]
dd = Stack(descriptors)
self.assertEqual(sorted(dd), sorted(tuples))
self.assertEqual(dd.schema, dshape(schema))
| {
"repo_name": "vitan/blaze",
"path": "blaze/data/tests/test_integrative.py",
"copies": "1",
"size": "1171",
"license": "bsd-3-clause",
"hash": 8785653668128866000,
"line_mean": 29.0256410256,
"line_max": 79,
"alpha_frac": 0.633646456,
"autogenerated": false,
"ratio": 3.3267045454545454,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4460351001454545,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import logging
from logging import Formatter
from functools import wraps
import traceback
import collections
from datetime import datetime
import errno
import functools
from hashlib import md5
import os
import socket
from time import time
from warnings import warn
import importlib
from datashape import discover, pprint
import flask
from flask import Blueprint, Flask, Response
from flask_cors import cross_origin
from werkzeug.http import parse_options_header
from toolz import valmap, compose
import blaze
import blaze as bz
from blaze import compute, resource
from blaze.compatibility import ExitStack, u8
from blaze.compute import compute_up
from .serialization import json, all_formats
from ..expr import (
BoundSymbol,
Data,
Expr,
Symbol,
symbol,
utils as expr_utils,
)
__all__ = 'Server', 'to_tree', 'from_tree', 'expr_md5'
# http://www.speedguide.net/port.php?port=6363
# http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers
DEFAULT_PORT = 6363
class RC(object):
"""
Simple namespace for HTTP status codes.
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes
"""
OK = 200
CREATED = 201
BAD_REQUEST = 400
UNAUTHORIZED = 401
NOT_FOUND = 404
FORBIDDEN = 403
CONFLICT = 409
UNPROCESSABLE_ENTITY = 422
UNSUPPORTED_MEDIA_TYPE = 415
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
api = Blueprint('api', __name__)
pickle_extension_api = Blueprint('pickle_extension_api', __name__)
_no_default = object() # sentinel
def _logging(func):
@wraps(func)
def _logger(*args, **kwargs):
logger = flask.current_app.logger
try:
logger.debug("Calling %s" % func.__name__)
ret = func(*args, **kwargs)
finally:
logger.debug("Leaving %s" % func.__name__)
return ret
return _logger
def _get_option(option, options, default=_no_default):
try:
return options[option]
except KeyError:
if default is not _no_default:
return default
# Provides a more informative error message.
msg = 'The blaze api must be registered with {option}'
raise TypeError(msg.format(option=option))
def ensure_dir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Default for logging exception tracebacks is to simply use the standard
# `traceback.format_tb`
_default_log_exception_formatter = compose(''.join, traceback.format_tb)
def _register_api(app, options, first_registration=False):
"""
Register the data with the blueprint.
"""
_get_data.cache[app] = _get_option('data', options)
_get_format.cache[app] = {f.name: f for f in _get_option('formats', options)}
_get_auth.cache[app] = (_get_option('authorization', options, None) or
(lambda a: True))
allow_profiler = _get_option('allow_profiler', options, False)
profiler_output = _get_option('profiler_output', options, None)
profile_by_default = _get_option('profile_by_default', options, False)
if not allow_profiler and (profiler_output or profile_by_default):
msg = "cannot set %s%s%s when 'allow_profiler' is False"
raise ValueError(msg % ('profiler_output' if profiler_output else '',
' or ' if profiler_output and profile_by_default else '',
'profile_by_default' if profile_by_default else ''))
if allow_profiler:
if profiler_output is None:
profiler_output = 'profiler_output'
if profiler_output != ':response':
ensure_dir(profiler_output)
_get_profiler_info.cache[app] = (allow_profiler,
profiler_output,
profile_by_default)
# Allowing users to dynamically add datasets to the Blaze server can be
# dangerous, so we only expose the method if specifically requested
allow_add = _get_option('allow_add', options, False)
if allow_add:
app.add_url_rule('/add', 'addserver', addserver,
methods=['POST', 'HEAD', 'OPTIONS'])
# Call the original register function.
Blueprint.register(api, app, options, first_registration)
api.register = _register_api
def per_app_accesor(name):
def _get():
return _get.cache[flask.current_app]
_get.cache = {}
_get.__name__ = '_get' + name
return _get
def _get_format(name):
return _get_format.cache[flask.current_app][name]
_get_format.cache = {}
_get_data = per_app_accesor('data')
_get_auth = per_app_accesor('auth')
_get_profiler_info = per_app_accesor('profiler_info')
def expr_md5(expr):
"""Returns the md5 hash of the str of the expression.
Parameters
----------
expr : Expr
The expression to hash.
Returns
-------
hexdigest : str
The hexdigest of the md5 of the str of ``expr``.
"""
exprstr = str(expr)
if not isinstance(exprstr, bytes):
exprstr = exprstr.encode('utf-8')
return md5(exprstr).hexdigest()
def _prof_path(profiler_output, expr):
"""Get the path to write the data for a profile run of ``expr``.
Parameters
----------
profiler_output : str
The director to write into.
expr : Expr
The expression that was run.
Returns
-------
prof_path : str
The filepath to write the new profiler data.
Notes
-----
This function ensures that the dirname of the returned path exists.
"""
dir_ = os.path.join(profiler_output,
expr_md5(expr)) # Use md5 so the client knows where to look.
ensure_dir(dir_)
return os.path.join(dir_,
str(int(datetime.utcnow().timestamp())))
def authorization(f):
@functools.wraps(f)
def authorized(*args, **kwargs):
if not _get_auth()(flask.request.authorization):
return Response('bad auth token',
RC.UNAUTHORIZED,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
return f(*args, **kwargs)
return authorized
def check_request(f):
@functools.wraps(f)
def check():
raw_content_type = flask.request.headers['content-type']
content_type, options = parse_options_header(raw_content_type)
if content_type not in accepted_mimetypes:
return ('Unsupported serialization format %s' % content_type,
RC.UNSUPPORTED_MEDIA_TYPE)
try:
serial = _get_format(accepted_mimetypes[content_type])
except KeyError:
return ("Unsupported serialization format '%s'" % content_type,
RC.UNSUPPORTED_MEDIA_TYPE)
try:
payload = serial.loads(flask.request.data)
except ValueError:
return ("Bad data. Got %s " % flask.request.data, RC.BAD_REQUEST)
return f(payload, serial)
return check
class FlaskWithExceptionFormatting(Flask):
""" Add a `log_exception_formatter` instance attribute to the Flask
application object, to allow it to store a handler function.
"""
log_exception_formatter = None
def __init__(self, *args, **kwargs):
self.log_exception_formatter = kwargs.pop('log_exception_formatter',
_default_log_exception_formatter)
super(FlaskWithExceptionFormatting, self).__init__(*args, **kwargs)
class Server(object):
""" Blaze Data Server
Host local data through a web API
Parameters
----------
data : dict, optional
A dictionary mapping dataset name to any data format that blaze
understands.
formats : iterable, optional
An iterable of supported serialization formats. By default, the
server will support JSON.
A serialization format is an object that supports:
name, loads, and dumps.
authorization : callable, optional
A callable to be used to check the auth header from the client.
This callable should accept a single argument that will either be
None indicating that no header was passed, or an object
containing a username and password attribute. By default, all requests
are allowed.
allow_profiler : bool, optional
Allow payloads to specify `"profile": true` which will run the
computation under cProfile.
profiler_output : str, optional
The directory to write pstats files after profile runs.
The files will be written in a structure like:
{profiler_output}/{hash(expr)}/{timestamp}
This defaults to a relative path of `profiler_output`.
This requires `allow_profiler=True`.
If this is the string ':response' then writing to the local filesystem
is disabled. Only requests that specify `profiler_output=':response'`
will be served. All others will return a 403 (Forbidden).
profile_by_default : bool, optional
Run the profiler on any computation that does not explicitly set
"profile": false.
This requires `allow_profiler=True`.
allow_add : bool, optional
Expose an `/add` endpoint to allow datasets to be dynamically added to
the server. Since this increases the risk of security holes, it defaults
to `False`.
logfile : str or file-like object, optional
A filename or open file-like stream to which to send log output. Defaults
to `sys.stdout`.
loglevel : str, optional
A string logging level (e.g. 'WARNING', 'INFO') to set how verbose log
output should be.
log_exception_formatter : callable, optional
A callable to be used to format an exception traceback for logging. It
should take a traceback argument, and return the string to be logged.
This defaults to the standard library `traceback.format_tb`
Examples
--------
>>> from pandas import DataFrame
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Alice', 300],
... [4, 'Dennis', 400],
... [5, 'Bob', -500]],
... columns=['id', 'name', 'amount'])
>>> server = Server({'accounts': df})
>>> server.run() # doctest: +SKIP
"""
def __init__(self,
data=None,
formats=None,
authorization=None,
allow_profiler=False,
profiler_output=None,
profile_by_default=False,
allow_add=False,
logfile=sys.stdout,
loglevel='WARNING',
log_exception_formatter=_default_log_exception_formatter):
if isinstance(data, collections.Mapping):
data = valmap(lambda v: v.data if isinstance(v, BoundSymbol) else v,
data)
elif isinstance(data, BoundSymbol):
data = data._resources()
app = self.app = FlaskWithExceptionFormatting('blaze.server.server',
log_exception_formatter=log_exception_formatter)
if data is None:
data = {}
app.register_blueprint(api,
data=data,
formats=formats if formats is not None else (json,),
authorization=authorization,
allow_profiler=allow_profiler,
profiler_output=profiler_output,
profile_by_default=profile_by_default,
allow_add=allow_add)
self.data = data
if logfile:
if isinstance(logfile, (str, bytes)):
handler = logging.FileHandler(logfile)
else:
handler = logging.StreamHandler(logfile)
handler.setFormatter(Formatter('[%(asctime)s %(levelname)s] %(message)s '
'[in %(pathname)s:%(lineno)d]'))
handler.setLevel(getattr(logging, loglevel))
app.logger.addHandler(handler)
def run(self, port=DEFAULT_PORT, retry=False, **kwargs):
"""Run the server.
Parameters
----------
port : int, optional
The port to bind to.
retry : bool, optional
If the port is busy, should we retry with the next available port?
**kwargs
Forwarded to the underlying flask app's ``run`` method.
Notes
-----
This function blocks forever when successful.
"""
self.port = port
try:
# Blocks until the server is shut down.
self.app.logger.debug('Starting server...')
self.app.run(port=port, **kwargs)
self.app.logger.debug('Stopping server...')
except socket.error:
if not retry:
raise
warn("Oops, couldn't connect on port %d. Is it busy?" % port)
# Attempt to start the server on a new port.
self.run(port=port + 1, retry=retry, **kwargs)
@api.route('/datashape', methods=['GET'])
@cross_origin(origins='*', methods=['GET'])
@authorization
@_logging
def shape():
return pprint(discover(_get_data()), width=0)
def to_tree(expr, names=None):
""" Represent Blaze expression with core data structures
Transform a Blaze expression into a form using only strings, dicts, lists
and base types (int, float, datetime, ....) This form can be useful for
serialization.
Parameters
----------
expr : Expr
A Blaze expression
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> to_tree(t) # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> to_tree(t.x.sum()) # doctest: +SKIP
{'op': 'sum',
'args': [{'op': 'Column',
'args': [{'op': 'Symbol'
'args': ['t',
'var * { x : int32, y : int32 }',
False]}
'x']}]}
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Column', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
from_tree
"""
if isinstance(expr, slice):
# NOTE: This case must come first, since `slice` objects are not
# hashable, so a dict lookup inside `names` will raise an execption.
return {u'op': u'slice',
u'args': [to_tree(arg, names=names) for arg in
[expr.start, expr.stop, expr.step]]}
if names and expr in names:
return names[expr]
if isinstance(expr, tuple):
return [to_tree(arg, names=names) for arg in expr]
if isinstance(expr, expr_utils._slice):
return to_tree(expr.as_slice(), names=names)
elif isinstance(expr, Data) and isinstance(expr.data, bz.Client):
name = u8(expr._name) if expr._name is not None else None
return to_tree(symbol(name, expr.dshape), names)
elif isinstance(expr, Expr):
return {u'op': u8(type(expr).__name__),
u'args': [to_tree(arg, names) for arg in expr._args]}
elif isinstance(expr, str):
return u8(expr)
else:
return expr
def expression_from_name(name):
"""
>>> expression_from_name('By')
<class 'blaze.expr.split_apply_combine.By'>
>>> expression_from_name('And')
<class 'blaze.expr.arithmetic.And'>
"""
import blaze
if hasattr(blaze, name):
return getattr(blaze, name)
if hasattr(blaze.expr, name):
return getattr(blaze.expr, name)
for signature, func in compute_up.funcs.items():
try:
if signature[0].__name__ == name:
return signature[0]
except TypeError:
pass
raise ValueError('%s not found in compute_up' % name)
def from_tree(expr, namespace=None):
""" Convert core data structures to Blaze expression
Core data structure representations created by ``to_tree`` are converted
back into Blaze expressions.
Parameters
----------
expr : dict
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> tree = to_tree(t)
>>> tree # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> from_tree(tree)
<`t` symbol; dshape='var * {x: int32, y: int32}'>
>>> tree = to_tree(t.x.sum())
>>> tree # doctest: +SKIP
{'op': 'sum',
'args': [{'op': 'Field',
'args': [{'op': 'Symbol'
'args': ['t',
'var * {x : int32, y : int32}',
False]}
'x']}]}
>>> from_tree(tree)
sum(t.x)
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Field', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
to_tree
"""
if isinstance(expr, dict):
op, args = expr[u'op'], expr[u'args']
if op == u'slice':
return expr_utils._slice(*[from_tree(arg, namespace)
for arg in args])
if hasattr(blaze.expr, op):
cls = getattr(blaze.expr, op)
else:
cls = expression_from_name(op)
if cls is Symbol:
cls = symbol
children = [from_tree(arg, namespace) for arg in args]
return cls(*children)
elif isinstance(expr, (list, tuple)):
return tuple(from_tree(arg, namespace) for arg in expr)
if namespace and expr in namespace:
return namespace[expr]
else:
return expr
accepted_mimetypes = {'application/vnd.blaze+{}'.format(x.name): x.name for x
in all_formats}
@api.route('/compute', methods=['POST', 'HEAD', 'OPTIONS'])
@cross_origin(origins='*', methods=['POST', 'HEAD', 'OPTIONS'])
@authorization
@check_request
@_logging
def compserver(payload, serial):
expected_keys = {u'namespace',
u'odo_kwargs',
u'compute_kwargs',
u'expr',
u'profile',
u'profiler_output'}
if not set(payload.keys()) < expected_keys:
return ('unexpected keys in payload: %r' % sorted(set(payload.keys()) -
expected_keys),
RC.BAD_REQUEST)
app = flask.current_app
(allow_profiler,
default_profiler_output,
profile_by_default) = _get_profiler_info()
requested_profiler_output = payload.get(u'profiler_output',
default_profiler_output)
profile = payload.get(u'profile')
profiling = (allow_profiler and
(profile or (profile_by_default and requested_profiler_output)))
if profile and not allow_profiler:
return ('profiling is disabled on this server', RC.FORBIDDEN)
with ExitStack() as response_construction_context_stack:
if profiling:
from cProfile import Profile
if (default_profiler_output == ':response' and
requested_profiler_output != ':response'):
# writing to the local filesystem is disabled
return ("local filepaths are disabled on this server, only"
" ':response' is allowed for the 'profiler_output' field",
RC.FORBIDDEN)
profiler_output = requested_profiler_output
profiler = Profile()
profiler.enable()
# ensure that we stop profiling in the case of an exception
response_construction_context_stack.callback(profiler.disable)
expr = '<failed to parse expr>'
@response_construction_context_stack.callback
def log_time(start=time()):
app.logger.info('compute expr: %s\ntotal time (s): %.3f',
expr,
time() - start)
ns = payload.get(u'namespace', {})
compute_kwargs = payload.get(u'compute_kwargs') or {}
odo_kwargs = payload.get(u'odo_kwargs') or {}
dataset = _get_data()
ns[':leaf'] = symbol('leaf', discover(dataset))
expr = from_tree(payload[u'expr'], namespace=ns)
leaf = expr._leaves()[0]
formatter = getattr(flask.current_app, 'log_exception_formatter',
_default_log_exception_formatter)
try:
result = serial.materialize(compute(expr,
{leaf: dataset},
**compute_kwargs),
expr.dshape,
odo_kwargs)
except NotImplementedError as e:
# Note: `sys.exc_info()[2]` holds the current traceback, for
# Python 2 / 3 compatibility. It's important not to store a local
# reference to it.
formatted_tb = formatter(sys.exc_info()[2])
error_msg = "Computation not supported:\n%s\n%s" % (e, formatted_tb)
app.logger.error(error_msg)
return (error_msg, RC.NOT_IMPLEMENTED)
except Exception as e:
formatted_tb = formatter(sys.exc_info()[2])
error_msg = "Computation failed with message:\n%s: %s\n%s" % (type(e).__name__, e, formatted_tb)
app.logger.error(error_msg)
return (error_msg, RC.INTERNAL_SERVER_ERROR)
response = {u'datashape': pprint(expr.dshape, width=0),
u'data': serial.data_dumps(result),
u'names': expr.fields}
if profiling:
import marshal
from pstats import Stats
if profiler_output == ':response':
from pandas.compat import BytesIO
file = BytesIO()
else:
file = open(_prof_path(profiler_output, expr), 'wb')
with file:
# Use marshal to dump the stats data to the given file.
# This is taken from cProfile which unfortunately does not have
# an api that allows us to pass the file object directly, only
# a file path.
marshal.dump(Stats(profiler).stats, file)
if profiler_output == ':response':
response[u'profiler_output'] = {'__!bytes': file.getvalue()}
return serial.dumps(response)
@cross_origin(origins='*', methods=['POST', 'HEAD', 'OPTIONS'])
@authorization
@check_request
@_logging
def addserver(payload, serial):
"""Add a data resource to the server.
The request should contain serialized MutableMapping (dictionary) like
object, and the server should already be hosting a MutableMapping resource.
"""
data = _get_data.cache[flask.current_app]
if not isinstance(data, collections.MutableMapping):
data_not_mm_msg = ("Cannot update blaze server data since its current "
"data is a %s and not a mutable mapping (dictionary "
"like).")
return (data_not_mm_msg % type(data), RC.UNPROCESSABLE_ENTITY)
if not isinstance(payload, collections.Mapping):
payload_not_mm_msg = ("Need a dictionary-like payload; instead was "
"given %s of type %s.")
return (payload_not_mm_msg % (payload, type(payload)),
RC.UNPROCESSABLE_ENTITY)
if len(payload) > 1:
error_msg = "Given more than one resource to add: %s"
return (error_msg % list(payload.keys()),
RC.UNPROCESSABLE_ENTITY)
[(name, resource_info)] = payload.items()
flask.current_app.logger.debug("Attempting to add dataset '%s'" % name)
if name in data:
msg = "Cannot add dataset named %s, already exists on server."
return (msg % name, RC.CONFLICT)
try:
imports = []
if isinstance(resource_info, dict):
# Extract resource creation arguments
source = resource_info['source']
imports = resource_info.get('imports', [])
args = resource_info.get('args', [])
kwargs = resource_info.get('kwargs', {})
else:
# Just a URI
source, args, kwargs = resource_info, [], {}
# If we've been given libraries to import, we need to do so
# before we can create the resource.
for mod in imports:
importlib.import_module(mod)
# Make a new resource and try to discover it.
new_resource = {name: resource(source, *args, **kwargs)}
# Discovery is a minimal consistency check to determine if the new
# resource is valid.
ds = discover(new_resource)
if name not in ds.dict:
raise ValueError("%s not added." % name)
except NotImplementedError as e:
error_msg = "Addition not supported:\n%s: %s"
return (error_msg % (type(e).__name__, e),
RC.UNPROCESSABLE_ENTITY)
except Exception as e:
error_msg = "Addition failed with message:\n%s: %s"
return (error_msg % (type(e).__name__, e),
RC.UNPROCESSABLE_ENTITY)
else:
# Now that we've established that the new resource is discoverable--and
# thus exists and is accessible--we add the resource to the server.
data.update(new_resource)
return ('OK', RC.CREATED)
| {
"repo_name": "ContinuumIO/blaze",
"path": "blaze/server/server.py",
"copies": "2",
"size": "26158",
"license": "bsd-3-clause",
"hash": 3728339222485488600,
"line_mean": 33.3280839895,
"line_max": 108,
"alpha_frac": 0.5726737518,
"autogenerated": false,
"ratio": 4.183941138835572,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5756614890635573,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import numpy as np
from glue.config import settings
from qtpy.QtWidgets import QMessageBox
from qtpy.QtCore import QTimer
from glue.core.data import BaseData
from glue.core.link_helpers import LinkSame
from ..common.vispy_data_viewer import BaseVispyViewer
from .layer_artist import VolumeLayerArtist
from .layer_style_widget import VolumeLayerStyleWidget
from .viewer_state import Vispy3DVolumeViewerState
from .layer_state import VolumeLayerState
from .volume_visual import MultiVolume
from ..scatter.layer_artist import ScatterLayerArtist
from ..scatter.layer_style_widget import ScatterLayerStyleWidget
from ..common import tools as _tools, selection_tools # noqa
from . import volume_toolbar # noqa
class VispyVolumeViewer(BaseVispyViewer):
LABEL = "3D Volume Rendering"
_state_cls = Vispy3DVolumeViewerState
_layer_style_widget_cls = {VolumeLayerArtist: VolumeLayerStyleWidget,
ScatterLayerArtist: ScatterLayerStyleWidget}
tools = BaseVispyViewer.tools + ['vispy:lasso', 'vispy:rectangle',
'vispy:circle', 'volume3d:floodfill']
def __init__(self, *args, **kwargs):
super(VispyVolumeViewer, self).__init__(*args, **kwargs)
# We now make it so that is the user clicks to drag or uses the
# mouse wheel (or scroll on a trackpad), we downsample the volume
# rendering temporarily.
canvas = self._vispy_widget.canvas
canvas.events.mouse_press.connect(self.mouse_press)
canvas.events.mouse_wheel.connect(self.mouse_wheel)
canvas.events.mouse_release.connect(self.mouse_release)
viewbox = self._vispy_widget.view.camera.viewbox
viewbox.events.mouse_wheel.connect(self.camera_mouse_wheel)
viewbox.events.mouse_move.connect(self.camera_mouse_move)
viewbox.events.mouse_press.connect(self.camera_mouse_press)
viewbox.events.mouse_release.connect(self.camera_mouse_release)
self._downsampled = False
# For the mouse wheel, we receive discrete events so we need to have
# a buffer (for now 250ms) before which we consider the mouse wheel
# event to have stopped.
self._downsample_timer = QTimer()
self._downsample_timer.setInterval(250)
self._downsample_timer.setSingleShot(True)
self._downsample_timer.timeout.connect(self.mouse_release)
# We need to use MultiVolume instance to store volumes, but we should
# only have one per canvas. Therefore, we store the MultiVolume
# instance in the vispy viewer instance.
# Set whether we are emulating a 3D texture. This needs to be
# enabled as a workaround on Windows otherwise VisPy crashes.
emulate_texture = (sys.platform == 'win32' and
sys.version_info[0] < 3)
multivol = MultiVolume(emulate_texture=emulate_texture,
bgcolor=settings.BACKGROUND_COLOR)
self._vispy_widget.add_data_visual(multivol)
self._vispy_widget._multivol = multivol
self.state.add_callback('resolution', self._update_resolution)
self._update_resolution()
# We do this here in addition to in the volume viewer itself as for
# some situations e.g. reloading from session files, a clip_data event
# isn't emitted.
# FIXME: needs to be done after first layer added
# self._update_clip(force=True)
def mouse_press(self, event=None):
if self.state.downsample:
if hasattr(self._vispy_widget, '_multivol') and not self._downsampled:
self._vispy_widget._multivol.downsample()
self._downsampled = True
def mouse_release(self, event=None):
if self.state.downsample:
if hasattr(self._vispy_widget, '_multivol') and self._downsampled:
self._vispy_widget._multivol.upsample()
self._downsampled = False
self._vispy_widget.canvas.render()
self._update_slice_transform()
self._update_clip()
def _update_clip(self, force=False):
if hasattr(self._vispy_widget, '_multivol'):
if (self.state.clip_data or force):
dx = self.state.x_stretch * self.state.aspect[0]
dy = self.state.y_stretch * self.state.aspect[1]
dz = self.state.z_stretch * self.state.aspect[2]
coords = np.array([[-dx, -dy, -dz], [dx, dy, dz]])
coords = (self._vispy_widget._multivol.transform.imap(coords)[:, :3] /
self._vispy_widget._multivol.resolution)
self._vispy_widget._multivol.set_clip(self.state.clip_data, coords.ravel())
else:
self._vispy_widget._multivol.set_clip(False, [0, 0, 0, 1, 1, 1])
def _update_slice_transform(self):
self._vispy_widget._multivol._update_slice_transform(self.state.x_min, self.state.x_max,
self.state.y_min, self.state.y_max,
self.state.z_min, self.state.z_max)
def _update_resolution(self, *event):
self._vispy_widget._multivol.set_resolution(self.state.resolution)
self._update_slice_transform()
self._update_clip()
def mouse_wheel(self, event=None):
if self.state.downsample:
if hasattr(self._vispy_widget, '_multivol'):
if not self._downsampled:
self.mouse_press()
self._downsample_timer.start()
if event is not None:
event.handled = True
def resizeEvent(self, event=None):
self.mouse_wheel()
super(VispyVolumeViewer, self).resizeEvent(event)
def get_data_layer_artist(self, layer=None, layer_state=None):
if layer.ndim == 1:
cls = ScatterLayerArtist
else:
cls = VolumeLayerArtist
return self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
def get_subset_layer_artist(self, layer=None, layer_state=None):
if layer.ndim == 1:
cls = ScatterLayerArtist
else:
cls = VolumeLayerArtist
return self.get_layer_artist(cls, layer=layer, layer_state=layer_state)
def add_data(self, data):
first_layer_artist = len(self._layer_artist_container) == 0
if data.ndim == 1:
if first_layer_artist:
QMessageBox.critical(self, "Error",
"Can only add a scatter plot overlay once "
"a volume is present".format(data.ndim),
buttons=QMessageBox.Ok)
return False
elif data.ndim == 3:
if not self._has_free_volume_layers:
self._warn_no_free_volume_layers()
return False
else:
QMessageBox.critical(self, "Error",
"Data should be 1- or 3-dimensional ({0} dimensions "
"found)".format(data.ndim),
buttons=QMessageBox.Ok)
return False
added = super(VispyVolumeViewer, self).add_data(data)
if added:
if data.ndim == 1:
self._vispy_widget._update_limits()
if first_layer_artist:
# The above call to add_data may have added subset layers, some
# of which may be incompatible with the data, so we need to now
# explicitly use the layer for the actual data object.
layer = self._layer_artist_container[data][0]
self.state.set_limits(*layer.bbox)
self._ready_draw = True
self._update_slice_transform()
self._show_free_layer_warning = True
return added
def add_subset(self, subset):
if not self._has_free_volume_layers:
self._warn_no_free_volume_layers()
return False
added = super(VispyVolumeViewer, self).add_subset(subset)
if added:
self._show_free_layer_warning = True
return added
@property
def _has_free_volume_layers(self):
return (not hasattr(self._vispy_widget, '_multivol') or
self._vispy_widget._multivol.has_free_slots)
def _warn_no_free_volume_layers(self):
if getattr(self, '_show_free_layer_warning', True):
QMessageBox.critical(self, "Error",
"The volume viewer has reached the maximum number "
"of volume layers. To show more volume layers, remove "
"existing layers and try again. This error will not "
"be shown again unless the limit is reached again in "
"the future.",
buttons=QMessageBox.Ok)
self._show_free_layer_warning = False
def _update_appearance_from_settings(self, message):
super(VispyVolumeViewer, self)._update_appearance_from_settings(message)
if hasattr(self._vispy_widget, '_multivol'):
self._vispy_widget._multivol.set_background(settings.BACKGROUND_COLOR)
def _toggle_clip(self, *args):
if hasattr(self._vispy_widget, '_multivol'):
self._update_clip()
@classmethod
def __setgluestate__(cls, rec, context):
viewer = super(VispyVolumeViewer, cls).__setgluestate__(rec, context)
if rec.get('_protocol', 0) < 2:
# Find all data objects in layers (not subsets)
layer_data = [layer.layer for layer in viewer.state.layers
if (isinstance(layer, VolumeLayerState) and
isinstance(layer.layer, BaseData))]
if len(layer_data) > 1:
reference = layer_data[0]
for data in layer_data[1:]:
if data not in reference.pixel_aligned_data:
break
else:
return viewer
buttons = QMessageBox.Yes | QMessageBox.No
message = ("The 3D volume rendering viewer now requires datasets to "
"be linked in order to be shown at the same time. Are you "
"happy for glue to automatically link your datasets by "
"pixel coordinates?")
answer = QMessageBox.question(None, "Link data?", message,
buttons=buttons,
defaultButton=QMessageBox.Yes)
if answer == QMessageBox.Yes:
for data in layer_data[1:]:
if data not in reference.pixel_aligned_data:
for i in range(3):
link = LinkSame(reference.pixel_component_ids[i],
data.pixel_component_ids[i])
viewer.session.data_collection.add_link(link)
return viewer
def __gluestate__(self, context):
state = super(VispyVolumeViewer, self).__gluestate__(context)
state['_protocol'] = 2
return state
| {
"repo_name": "astrofrog/glue-3d-viewer",
"path": "glue_vispy_viewers/volume/volume_viewer.py",
"copies": "2",
"size": "11455",
"license": "bsd-2-clause",
"hash": 3214101672879490000,
"line_mean": 39.3345070423,
"line_max": 96,
"alpha_frac": 0.5828022698,
"autogenerated": false,
"ratio": 4.206757253029747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5789559522829747,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import os
import logging
from functools import wraps
from pkgutil import get_loader
from collections import OrderedDict
import datetime
import pymongo.errors
import tornado.ioloop
import tornado.web
from tornado.escape import json_encode,json_decode
from tornado.gen import coroutine
from file_catalog.validation import Validation
import file_catalog
from file_catalog.mongo import Mongo
from file_catalog import urlargparse
logger = logging.getLogger('server')
def get_pkgdata_filename(package, resource):
"""Get a filename for a resource bundled within the package"""
loader = get_loader(package)
if loader is None or not hasattr(loader, 'get_data'):
return None
mod = sys.modules.get(package) or loader.load_module(package)
if mod is None or not hasattr(mod, '__file__'):
return None
# Modify the resource name to be compatible with the loader.get_data
# signature - an os.path format "filename" starting with the dirname of
# the package's __file__
parts = resource.split('/')
parts.insert(0, os.path.dirname(mod.__file__))
return os.path.join(*parts)
def tornado_logger(handler):
"""Log levels based on status code"""
if handler.get_status() < 400:
log_method = logger.debug
elif handler.get_status() < 500:
log_method = logger.warning
else:
log_method = logger.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
def sort_dict(d):
"""
Creates an OrderedDict by taking the `dict` named `d` and orderes its keys.
If a key contains a `dict` it will call this function recursively.
"""
od = OrderedDict(sorted(d.items()))
# check for dicts in values
for key, value in od.iteritems():
if isinstance(value, dict):
od[key] = sort_dict(value)
return od
def set_last_modification_date(d):
d['meta_modify_date'] = str(datetime.datetime.utcnow())
class Server(object):
"""A file_catalog server instance"""
def __init__(self, config, port=8888, db_host='localhost', debug=False):
static_path = get_pkgdata_filename('file_catalog', 'data/www')
if static_path is None:
raise Exception('bad static path')
template_path = get_pkgdata_filename('file_catalog', 'data/www_templates')
if template_path is None:
raise Exception('bad template path')
# print configuration
logger.info('db host: %s' % db_host)
logger.info('server port: %s' % port)
logger.info('debug: %s' % debug)
main_args = {
'base_url': '/api',
'debug': debug,
}
api_args = main_args.copy()
api_args.update({
'db': Mongo(db_host),
'config': config,
})
app = tornado.web.Application([
(r"/", MainHandler, main_args),
(r"/api", HATEOASHandler, api_args),
(r"/api/files", FilesHandler, api_args),
(r"/api/files/(.*)", SingleFileHandler, api_args),
],
static_path=static_path,
template_path=template_path,
log_function=tornado_logger,
)
app.listen(port)
def run(self):
tornado.ioloop.IOLoop.current().start()
class MainHandler(tornado.web.RequestHandler):
"""Main HTML handler"""
def initialize(self, base_url='/', debug=False):
self.base_url = base_url
self.debug = debug
def get_template_namespace(self):
namespace = super(MainHandler,self).get_template_namespace()
namespace['version'] = file_catalog.__version__
return namespace
def get(self):
try:
self.render('index.html')
except Exception as e:
logger.warn('Error in main handler', exc_info=True)
message = 'Error generating page.'
if self.debug:
message += '\n' + str(e)
self.send_error(message=message)
def write_error(self,status_code=500,**kwargs):
"""Write out custom error page."""
self.set_status(status_code)
if status_code >= 500:
self.write('<h2>Internal Error</h2>')
else:
self.write('<h2>Request Error</h2>')
if 'message' in kwargs:
self.write('<br />'.join(kwargs['message'].split('\n')))
self.finish()
def catch_error(method):
"""Decorator to catch and handle errors on api handlers"""
@wraps(method)
def wrapper(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except Exception as e:
logger.warn('Error in api handler', exc_info=True)
kwargs = {'message':'Internal error in '+self.__class__.__name__}
if self.debug:
kwargs['exception'] = str(e)
self.send_error(**kwargs)
return wrapper
class APIHandler(tornado.web.RequestHandler):
"""Base class for API handlers"""
def initialize(self, config, db=None, base_url='/', debug=False, rate_limit=10):
self.db = db
self.base_url = base_url
self.debug = debug
self.config = config
# subtract 1 to test before current connection is added
self.rate_limit = rate_limit-1
self.rate_limit_data = {}
def set_default_headers(self):
self.set_header('Content-Type', 'application/hal+json; charset=UTF-8')
def prepare(self):
# implement rate limiting
ip = self.request.remote_ip
if ip in self.rate_limit_data:
if self.rate_limit_data[ip] > self.rate_limit:
self.send_error(429, 'rate limit exceeded for IP address')
else:
self.rate_limit_data[ip] += 1
else:
self.rate_limit_data[ip] = 1
def on_finish(self):
ip = self.request.remote_ip
self.rate_limit_data[ip] -= 1
if self.rate_limit_data[ip] <= 0:
del self.rate_limit_data[ip]
def write(self, chunk):
# override write so we don't output a json header
if isinstance(chunk, dict):
chunk = json_encode(sort_dict(chunk))
super(APIHandler, self).write(chunk)
def write_error(self,status_code=500,**kwargs):
"""Write out custom error page."""
self.set_status(status_code)
kwargs.pop('exc_info',None)
if kwargs:
self.write(kwargs)
self.finish()
class HATEOASHandler(APIHandler):
def initialize(self, **kwargs):
super(HATEOASHandler, self).initialize(**kwargs)
# response is known ahead of time, so pre-compute it
self.data = {
'_links':{
'self': {'href': self.base_url},
},
'files': {'href': os.path.join(self.base_url,'files')},
}
@catch_error
def get(self):
self.write(self.data)
class FilesHandler(APIHandler):
def initialize(self, **kwargs):
super(FilesHandler, self).initialize(**kwargs)
self.files_url = os.path.join(self.base_url,'files')
self.validation = Validation(self.config)
@catch_error
@coroutine
def get(self):
try:
kwargs = urlargparse.parse(self.request.query)
if 'limit' in kwargs:
kwargs['limit'] = int(kwargs['limit'])
if kwargs['limit'] < 1:
raise Exception('limit is not positive')
# check with config
if kwargs['limit'] > self.config['filelist']['max_files']:
kwargs['limit'] = self.config['filelist']['max_files']
else:
# if no limit has been defined, set max limit
kwargs['limit'] = self.config['filelist']['max_files']
if 'start' in kwargs:
kwargs['start'] = int(kwargs['start'])
if kwargs['start'] < 0:
raise Exception('start is negative')
if 'query' in kwargs:
kwargs['query'] = json_decode(kwargs['query'])
# _id and mongo_id means the same (mongo_id will be renamed to _id in self.db.find_files())
# make sure that not both keys are in query
if '_id' in kwargs['query'] and 'mongo_id' in kwargs['query']:
logging.warn('`query` contains `_id` and `mongo_id`', exc_info=True)
self.send_error(400, message='`query` contains `_id` and `mongo_id`')
return
except:
logging.warn('query parameter error', exc_info=True)
self.send_error(400, message='invalid query parameters')
return
files = yield self.db.find_files(**kwargs)
self.write({
'_links':{
'self': {'href': self.files_url},
'parent': {'href': self.base_url},
},
'_embedded':{
'files': files,
},
'files': [os.path.join(self.files_url,f['mongo_id']) for f in files],
})
@catch_error
@coroutine
def post(self):
metadata = json_decode(self.request.body)
if not self.validation.validate_metadata_creation(self, metadata):
return
set_last_modification_date(metadata)
ret = yield self.db.get_file({'uid':metadata['uid']})
if ret:
# file uid already exists, check checksum
if ret['checksum'] != metadata['checksum']:
# the uid already exists (no replica since checksum is different
self.send_error(409, message='conflict with existing file (uid already exists)',
file=os.path.join(self.files_url,ret['mongo_id']))
return
elif any(f in ret['locations'] for f in metadata['locations']):
# replica has already been added
self.send_error(409, message='replica has already been added',
file=os.path.join(self.files_url,ret['mongo_id']))
return
else:
# add replica
ret['locations'].extend(metadata['locations'])
yield self.db.update_file(ret)
self.set_status(200)
ret = ret['mongo_id']
else:
ret = yield self.db.create_file(metadata)
self.set_status(201)
self.write({
'_links':{
'self': {'href': self.files_url},
'parent': {'href': self.base_url},
},
'file': os.path.join(self.files_url, ret),
})
class SingleFileHandler(APIHandler):
def initialize(self, **kwargs):
super(SingleFileHandler, self).initialize(**kwargs)
self.files_url = os.path.join(self.base_url,'files')
self.validation = Validation(self.config)
@catch_error
@coroutine
def get(self, mongo_id):
try:
ret = yield self.db.get_file({'mongo_id':mongo_id})
if ret:
ret['_links'] = {
'self': {'href': os.path.join(self.files_url,mongo_id)},
'parent': {'href': self.files_url},
}
self.write(ret)
else:
self.send_error(404, message='not found')
except pymongo.errors.InvalidId:
self.send_error(400, message='Not a valid mongo_id')
@catch_error
@coroutine
def delete(self, mongo_id):
try:
yield self.db.delete_file({'mongo_id':mongo_id})
except pymongo.errors.InvalidId:
self.send_error(400, message='Not a valid mongo_id')
except:
self.send_error(404, message='not found')
else:
self.set_status(204)
@catch_error
@coroutine
def patch(self, mongo_id):
metadata = json_decode(self.request.body)
if self.validation.has_forbidden_attributes_modification(self, metadata):
return
set_last_modification_date(metadata)
links = {
'self': {'href': os.path.join(self.files_url,mongo_id)},
'parent': {'href': self.files_url},
}
try:
ret = yield self.db.get_file({'mongo_id':mongo_id})
except pymongo.errors.InvalidId:
self.send_error(400, message='Not a valid mongo_id')
return
if ret:
# check if this is the same version we're trying to patch
test_write = ret.copy()
test_write['_links'] = links
self.write(test_write)
self.set_etag_header()
same = self.check_etag_header()
self._write_buffer = []
if same:
ret.update(metadata)
if not self.validation.validate_metadata_modification(self, ret):
return
yield self.db.update_file(ret.copy())
ret['_links'] = links
self.write(ret)
self.set_etag_header()
else:
self.send_error(409, message='conflict (version mismatch)',
_links=links)
else:
self.send_error(404, message='not found')
@catch_error
@coroutine
def put(self, mongo_id):
metadata = json_decode(self.request.body)
# check if user wants to set forbidden fields
# `uid` is not allowed to be changed
if self.validation.has_forbidden_attributes_modification(self, metadata):
return
set_last_modification_date(metadata)
if 'mongo_id' not in metadata:
metadata['mongo_id'] = mongo_id
links = {
'self': {'href': os.path.join(self.files_url,mongo_id)},
'parent': {'href': self.files_url},
}
try:
ret = yield self.db.get_file({'mongo_id':mongo_id})
except pymongo.errors.InvalidId:
self.send_error(400, message='Not a valid mongo_id')
return
# keep `uid`:
metadata['uid'] = str(ret['uid'])
if ret:
# check if this is the same version we're trying to patch
test_write = ret.copy()
test_write['_links'] = links
self.write(test_write)
self.set_etag_header()
same = self.check_etag_header()
self._write_buffer = []
if same:
if not self.validation.validate_metadata_modification(self, metadata):
return
yield self.db.replace_file(metadata.copy())
metadata['_links'] = links
self.write(metadata)
self.set_etag_header()
else:
self.send_error(409, message='conflict (version mismatch)',
_links=links)
else:
self.send_error(404, message='not found')
| {
"repo_name": "JadonKrys/file_catalog",
"path": "file_catalog/server.py",
"copies": "1",
"size": "15207",
"license": "mit",
"hash": -2168862450571826400,
"line_mean": 32.6438053097,
"line_max": 107,
"alpha_frac": 0.5527717499,
"autogenerated": false,
"ratio": 4.1144480519480515,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0060145178110627175,
"num_lines": 452
} |
from __future__ import absolute_import, division, print_function
import sys
import os
import pandas as pd
import gzip
import datashape
from datashape import Option, string
from collections import Iterator
from into.backends.csv import (CSV, append, convert, resource,
csv_to_DataFrame, CSV_to_chunks_of_dataframes)
from into.utils import tmpfile, filetext, filetexts, raises
from into import into, append, convert, resource, discover, dshape
from into.compatibility import unicode, skipif
def test_csv():
with tmpfile('.csv') as fn:
csv = CSV(fn, dshape='var * {name: string, amount: int}', delimiter=',')
assert csv.dialect['delimiter'] == ','
def test_csv_append():
with tmpfile('.csv') as fn:
csv = CSV(fn, has_header=False)
data = [('Alice', 100), ('Bob', 200)]
append(csv, data)
assert list(convert(Iterator, csv)) == data
with open(fn) as f:
s = f.read()
assert 'Alice' in s
assert '100' in s
def test_pandas_read():
with filetext('Alice,1\nBob,2') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
csv = CSV(fn)
df = csv_to_DataFrame(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert convert(list, df) == [('Alice', 1), ('Bob', 2)]
assert list(df.columns) == ['name', 'amount']
def test_pandas_read_supports_datetimes():
with filetext('Alice,2014-01-02\nBob,2014-01-03') as fn:
ds = datashape.dshape('var * {name: string, when: date}')
csv = CSV(fn)
df = csv_to_DataFrame(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert list(df.columns) == ['name', 'when']
assert df.dtypes['when'] == 'M8[ns]'
def test_pandas_read_supports_missing_integers():
with filetext('Alice,1\nBob,') as fn:
ds = datashape.dshape('var * {name: string, val: ?int32}')
csv = CSV(fn)
df = csv_to_DataFrame(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert list(df.columns) == ['name', 'val']
assert df.dtypes['val'] == 'f4'
def test_pandas_read_supports_gzip():
with filetext('Alice,1\nBob,2', open=gzip.open, extension='.csv.gz') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
csv = CSV(fn)
df = csv_to_DataFrame(csv, dshape=ds)
assert isinstance(df, pd.DataFrame)
assert convert(list, df) == [('Alice', 1), ('Bob', 2)]
assert list(df.columns) == ['name', 'amount']
def test_pandas_read_supports_read_csv_kwargs():
with filetext('Alice,1\nBob,2') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
csv = CSV(fn)
df = csv_to_DataFrame(csv, dshape=ds, usecols=['name'])
assert isinstance(df, pd.DataFrame)
assert convert(list, df) == [('Alice',), ('Bob',)]
def test_pandas_write():
with tmpfile('.csv') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
data = [('Alice', 1), ('Bob', 2)]
csv = CSV(fn, has_header=True)
append(csv, data, dshape=ds)
with open(fn) as f:
assert 'name' in f.read()
# Doesn't write header twice
append(csv, data, dshape=ds)
with open(fn) as f:
s = f.read()
assert s.count('name') == 1
def test_pandas_writes_header_by_default():
with tmpfile('.csv') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
data = [('Alice', 1), ('Bob', 2)]
csv = CSV(fn)
append(csv, data, dshape=ds)
with open(fn) as f:
assert 'name' in f.read()
@skipif(sys.version_info[0] == 3)
def test_pandas_write_gzip():
with tmpfile('.csv.gz') as fn:
ds = datashape.dshape('var * {name: string, amount: int}')
data = [('Alice', 1), ('Bob', 2)]
csv = CSV(fn, has_header=True)
append(csv, data, dshape=ds)
f = gzip.open(fn)
s = f.read()
assert 'name' in s
assert 'Alice,1' in s
f.close()
def test_pandas_loads_in_datetimes_naively():
with filetext('name,when\nAlice,2014-01-01\nBob,2014-02-02') as fn:
csv = CSV(fn, has_header=True)
ds = datashape.dshape('var * {name: string, when: datetime}')
assert discover(csv) == ds
df = convert(pd.DataFrame, csv)
assert df.dtypes['when'] == 'M8[ns]'
def test_pandas_discover_on_gzipped_files():
with filetext('name,when\nAlice,2014-01-01\nBob,2014-02-02',
open=gzip.open, extension='.csv.gz') as fn:
csv = CSV(fn, has_header=True)
ds = datashape.dshape('var * {name: string, when: datetime}')
assert discover(csv) == ds
def test_csv_into_list():
with filetext('name,val\nAlice,100\nBob,200', extension='csv') as fn:
L = into(list, fn)
assert L == [('Alice', 100), ('Bob', 200)]
def test_discover_csv_files_without_header():
with filetext('Alice,2014-01-01\nBob,2014-02-02') as fn:
csv = CSV(fn, has_header=False)
df = convert(pd.DataFrame, csv)
assert len(df) == 2
assert 'Alice' not in list(df.columns)
def test_discover_csv_yields_string_on_totally_empty_columns():
expected = dshape('var * {a: int64, b: ?string, c: int64}')
with filetext('a,b,c\n1,,3\n4,,6\n7,,9') as fn:
csv = CSV(fn, has_header=True)
assert discover(csv) == expected
def test_glob():
d = {'accounts1.csv': 'name,when\nAlice,100\nBob,200',
'accounts2.csv': 'name,when\nAlice,300\nBob,400'}
with filetexts(d) as fns:
r = resource('accounts*.csv', has_header=True)
assert convert(list, r) == [('Alice', 100), ('Bob', 200),
('Alice', 300), ('Bob', 400)]
def test_pandas_csv_naive_behavior_results_in_columns():
df = pd.DataFrame([[1, 'Alice', 100],
[2, 'Bob', -200],
[3, 'Charlie', 300],
[4, 'Denis', 400],
[5, 'Edith', -500]], columns=['id', 'name', 'amount'])
with tmpfile('.csv') as fn:
into(fn, df)
with open(fn) as f:
assert next(f).strip() == 'id,name,amount'
def test_discover_csv_without_columns():
with filetext('Alice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
ds = discover(csv)
assert '100' not in str(ds)
def test_header_argument_set_with_or_without_header():
with filetext('name,val\nAlice,100\nBob,200', extension='csv') as fn:
assert into(list, fn) == [('Alice', 100), ('Bob', 200)]
with filetext('Alice,100\nBob,200', extension='csv') as fn:
assert into(list, fn) == [('Alice', 100), ('Bob', 200)]
def test_first_csv_establishes_consistent_dshape():
d = {'accounts1.csv': 'name,when\nAlice,one\nBob,two',
'accounts2.csv': 'name,when\nAlice,300\nBob,400'}
with filetexts(d) as fns:
L = into(list, 'accounts*.csv')
assert len(L) == 4
assert all(isinstance(val, (str, unicode)) for name, val in L)
def test_discover_csv_with_spaces_in_header():
with filetext(' name, val\nAlice,100\nBob,200', extension='csv') as fn:
ds = discover(CSV(fn, has_header=True))
assert ds.measure.names == ['name', 'val']
def test_header_disagrees_with_dshape():
ds = datashape.dshape('var * {name: string, bal: int64}')
with filetext('name,val\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn, header=True)
assert convert(list, csv) == [('Alice', 100), ('Bob', 200)]
assert list(convert(pd.DataFrame, csv).columns) == ['name', 'val']
assert list(convert(pd.DataFrame, csv, dshape=ds).columns) == ['name', 'bal']
def test_raise_errors_quickly_on_into_chunks_dataframe():
with filetext('name,val\nAlice,100\nBob,foo', extension='csv') as fn:
ds = datashape.dshape('var * {name: string, val: int}')
csv = CSV(fn, header=True)
assert raises(Exception,
lambda: CSV_to_chunks_of_dataframes(csv, dshape=ds))
def test_unused_datetime_columns():
ds = datashape.dshape('var * {val: string, when: datetime}')
with filetext("val,when\na,2000-01-01\nb,2000-02-02") as fn:
csv = CSV(fn, has_header=True)
assert convert(list, csv_to_DataFrame(csv, usecols=['val'],
squeeze=True, dshape=ds)) == ['a', 'b']
def test_empty_dataframe():
with filetext('name,val', extension='csv') as fn:
csv = CSV(fn, has_header=True)
df = convert(pd.DataFrame, csv)
assert isinstance(df, pd.DataFrame)
def test_csv_missing_values():
with filetext('name,val\nAlice,100\nNA,200', extension='csv') as fn:
csv = CSV(fn)
assert discover(csv).measure.dict['name'] == Option(string)
def test_csv_separator_header():
with filetext('a|b|c\n1|2|3\n4|5|6', extension='csv') as fn:
csv = CSV(fn, delimiter='|', has_header=True)
assert convert(list, csv) == [(1, 2, 3), (4, 5, 6)]
| {
"repo_name": "mrocklin/into",
"path": "into/backends/tests/test_csv.py",
"copies": "1",
"size": "9071",
"license": "bsd-3-clause",
"hash": 945541076402372200,
"line_mean": 33.2301886792,
"line_max": 85,
"alpha_frac": 0.5828464337,
"autogenerated": false,
"ratio": 3.199647266313933,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4282493700013933,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import os
import flask
from flask import request, Response
import blaze
import datashape
from dynd import nd, ndt
from blaze.catalog.array_provider import json_array_provider
from blaze.catalog.blaze_url import (split_array_base, add_indexers_to_url,
slice_as_string, index_tuple_as_string)
from blaze.py2help import _inttypes, _strtypes
from .datashape_html import render_datashape
from .compute_session import compute_session
from .crossdomain import crossdomain
app = flask.Flask('blaze.server')
app.sessions = {}
def indexers_navigation_html(base_url, array_name, indexers):
base_url = base_url + array_name
result = '<a href="' + base_url + '">' + array_name + '</a>'
for i, idx in enumerate(indexers):
if isinstance(idx, _strtypes):
base_url = base_url + '.' + idx
result += (' . <a href="' + base_url + '">' + idx + '</a>')
elif isinstance(idx, _inttypes):
new_base_url = base_url + '[' + str(idx) + ']'
result += (' <a href="' + new_base_url + '">[' + str(idx) + ']</a>')
# Links to increment/decrement this indexer
#result += '<font style="size:7px"><table cellpadding="0" cellspacing="0" border="0">'
#result += '<tr><td><a href="'
#result += add_indexers_to_url(base_url, [idx + 1] + indexers[i+1:])
#result += '">/\\</a></td></tr>'
#result += '<tr><td><a href="'
#result += add_indexers_to_url(base_url, [idx - 1] + indexers[i+1:])
#result += '">\\/</a></td></tr>'
#result += '</table></font>'
base_url = new_base_url
elif isinstance(idx, slice):
s = slice_as_string(idx)
base_url = base_url + s
result += (' <a href="' + base_url + '">' + s + '</a>')
elif isinstance(idx, tuple):
s = index_tuple_as_string(idx)
base_url = base_url + s
result += (' <a href="' + base_url + '">' + s + '</a>')
else:
raise IndexError('Invalid indexer %r' % idx)
return result
def get_array(array_name, indexers):
arr = blaze.catalog.get(array_name)
for i in indexers:
if type(i) in [slice, int, tuple]:
arr = arr[i]
else:
ds = arr.dshape
if isinstance(ds, datashape.DataShape):
ds = ds[-1]
if isinstance(ds, datashape.Record) and i in ds.names:
arr = getattr(arr, i)
else:
raise Exception('Blaze array does not have field ' + i)
return arr
def html_array(arr, base_url, array_name, indexers):
array_url = add_indexers_to_url(base_url + array_name, indexers)
print(array_url)
nav_html = indexers_navigation_html(base_url, array_name, indexers)
datashape_html = render_datashape(array_url, arr.dshape)
body = '<html><head><title>Blaze Array</title></head>\n' + \
'<body>\n' + \
'Blaze Array > ' + nav_html + '\n<p />\n' + \
'<a href="' + array_url + '?r=data.json">JSON</a>\n<p />\n' + \
datashape_html + \
'\n</body></html>'
return body
@app.route("/favicon.ico")
def favicon():
return 'no icon'
@app.route("/<path:path>", methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(origin="*", automatic_options=False, automatic_headers=True)
def handle(path):
if request.path in app.sessions:
return handle_session_query()
else:
return handle_array_query()
def handle_session_query():
session = app.sessions[request.path]
q_req = request.values['r']
if q_req == 'close_session':
content_type, body = session.close()
return Response(body, mimetype='application/json')
elif q_req == 'add_computed_fields':
j = request.values['json']
content_type, body = session.add_computed_fields(j)
return Response(body, mimetype='application/json')
elif q_req == 'sort':
j = request.values['json']
content_type, body = session.sort(j)
return Response(body, mimetype='application/json')
elif q_req == 'groupby':
j = request.values['json']
content_type, body = session.groupby(j)
return Response(body, mimetype='application/json')
else:
return 'something with session ' + session.session_name
def handle_array_query():
array_name, indexers = split_array_base(request.path.rstrip('/'))
arr = get_array(array_name, indexers)
base_url = request.url_root[:-1]
#no query params
# NOTE: len(request.values) was failing within werkzeug
if len(list(request.values)) == 0:
return html_array(arr, base_url, array_name, indexers)
q_req = request.values['r']
if q_req == 'data.json':
dat = arr._data.dynd_arr()
return Response(nd.as_py(nd.format_json(dat).view_scalars(ndt.bytes)),
mimetype='application/json')
elif q_req == 'datashape':
content_type = 'text/plain; charset=utf-8'
return str(arr.dshape)
elif q_req == 'dyndtype':
content_type = 'application/json; charset=utf-8'
body = str(arr.dtype)
return Response(body, mimetype='application/json')
elif q_req == 'dynddebug':
return arr.debug_repr()
elif q_req == 'create_session':
session = compute_session(base_url,
add_indexers_to_url(array_name, indexers))
app.sessions[session.session_name] = session
content_type, body = session.creation_response()
return Response(body, mimetype='application/json')
else:
abort(400, "Unknown Blaze server request %s" % q_req)
if __name__ == "__main__":
if len(sys.argv) > 1:
root_path = sys.argv[1]
else:
root_path = os.path.join(os.getcwdu(), 'arrays')
array_provider = json_array_provider(root_path)
app.array_provider = array_provider
app.run(debug=True, port=8080, use_reloader=True)
| {
"repo_name": "XinSong/blaze",
"path": "blaze/io/server/app.py",
"copies": "10",
"size": "6092",
"license": "bsd-3-clause",
"hash": 7882122456148219000,
"line_mean": 35.9212121212,
"line_max": 98,
"alpha_frac": 0.5809258043,
"autogenerated": false,
"ratio": 3.4340473506200677,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014842168432524633,
"num_lines": 165
} |
from __future__ import absolute_import, division, print_function
import sys
import re
from contextlib import contextmanager
import datashape
from datashape import discover, Record, Option
from datashape.predicates import isrecord
from datashape.dispatch import dispatch
from toolz import concat, keyfilter, keymap, merge, valfilter
import pandas
import pandas as pd
import os
import gzip
import bz2
import uuid
import csv
from glob import glob
from ..compatibility import unicode, PY2
from ..utils import keywords, ext
from ..append import append
from ..convert import convert, ooc_types
from ..resource import resource
from ..chunks import chunks
from ..temp import Temp
from ..numpy_dtype import dshape_to_pandas
from .pandas import coerce_datetimes
dialect_terms = '''delimiter doublequote escapechar lineterminator quotechar
quoting skipinitialspace strict'''.split()
aliases = {'sep': 'delimiter'}
def alias(key):
""" Alias kwarg dialect keys to normalized set
>>> alias('sep')
'delimiter'
"""
key = key.lower()
return aliases.get(key, key)
@contextmanager
def open_file(path, *args, **kwargs):
f = compressed_open.get(ext(path), open)(path, *args, **kwargs)
try:
yield f
finally:
f.close()
def infer_header(path, nbytes=10000, encoding='utf-8', **kwargs):
if encoding is None:
encoding = 'utf-8'
with open_file(path, 'rb') as f:
raw = f.read(nbytes)
return csv.Sniffer().has_header(raw if PY2 else raw.decode(encoding))
def sniff_dialect(path, nbytes, encoding='utf-8'):
if not os.path.exists(path):
return {}
if encoding is None:
encoding = 'utf-8'
with open_file(path, 'rb') as f:
raw = f.read(nbytes)
dialect = csv.Sniffer().sniff(raw.decode(encoding))
dialect.lineterminator = (b'\r\n'
if b'\r\n' in raw else b'\n').decode(encoding)
return dialect_to_dict(dialect)
def dialect_to_dict(dialect):
return dict((name, getattr(dialect, name))
for name in dialect_terms if hasattr(dialect, name))
class CSV(object):
""" Proxy for a CSV file
Parameters
----------
path : str
Path to file on disk
has_header : bool
If the csv file has a header or not
encoding : str (default utf-8)
File encoding
kwargs : other...
Various choices about dialect
"""
canonical_extension = 'csv'
def __init__(self, path, has_header=None, encoding='utf-8',
sniff_nbytes=10000, **kwargs):
self.path = path
if has_header is None:
self.has_header = (not os.path.exists(path) or
infer_header(path, sniff_nbytes))
else:
self.has_header = has_header
self.encoding = encoding if encoding is not None else 'utf-8'
kwargs = merge(sniff_dialect(path, sniff_nbytes, encoding=encoding),
keymap(alias, kwargs))
self.dialect = valfilter(bool,
dict((d, kwargs[d])
for d in dialect_terms if d in kwargs))
@append.register(CSV, object)
def append_object_to_csv(c, seq, **kwargs):
append(c, convert(chunks(pd.DataFrame), seq, **kwargs), **kwargs)
return c
compressed_open = {'gz': gzip.open, 'bz2': bz2.BZ2File}
@append.register(CSV, pd.DataFrame)
def append_dataframe_to_csv(c, df, dshape=None, **kwargs):
if not os.path.exists(c.path) or not os.path.getsize(c.path):
has_header = kwargs.pop('has_header', c.has_header)
else:
has_header = False
sep = kwargs.get('sep',
kwargs.get('delimiter', c.dialect.get('delimiter', ',')))
encoding = kwargs.get('encoding', c.encoding)
if ext(c.path) in compressed_open:
if sys.version_info[0] >= 3:
kwargs['mode'] = 'at'
kwargs['encoding'] = encoding
elif sys.version_info[0] == 2:
kwargs['mode'] = 'ab' if sys.platform == 'win32' else 'at'
f = compressed_open[ext(c.path)](c.path, **kwargs)
else:
f = c.path
try:
df.to_csv(f, mode='a', header=has_header, index=False, sep=sep,
encoding=encoding)
finally:
if hasattr(f, 'close'):
f.close()
return c
@append.register(CSV, chunks(pd.DataFrame))
def append_iterator_to_csv(c, cs, **kwargs):
for chunk in cs:
append(c, chunk, **kwargs)
return c
@convert.register(pd.DataFrame, (Temp(CSV), CSV), cost=20.0)
def csv_to_dataframe(c, dshape=None, chunksize=None, nrows=None, **kwargs):
try:
return _csv_to_dataframe(c, dshape=dshape, chunksize=chunksize,
nrows=nrows, **kwargs)
except StopIteration:
if nrows:
return _csv_to_dataframe(c, dshape=dshape, chunksize=chunksize,
**kwargs)
else:
raise
def _csv_to_dataframe(c, dshape=None, chunksize=None, **kwargs):
header = {False: None, True: 0}.get(
kwargs.pop('has_header', c.has_header), 'infer')
sep = kwargs.pop(
'sep', kwargs.pop('delimiter', c.dialect.get('delimiter', ',')))
encoding = kwargs.get('encoding', c.encoding)
if dshape:
dtypes, parse_dates = dshape_to_pandas(dshape)
if isrecord(dshape.measure):
names = kwargs.get('names', dshape.measure.names)
else:
names = kwargs.get('names')
else:
dtypes = parse_dates = names = None
usecols = kwargs.pop('usecols', None)
if parse_dates and usecols:
parse_dates = [col for col in parse_dates if col in usecols]
compression = kwargs.pop('compression',
{'gz': 'gzip', 'bz2': 'bz2'}.get(ext(c.path)))
# See read_csv docs for header for reasoning
if names:
try:
found_names = pd.read_csv(c.path, encoding=encoding,
compression=compression, nrows=1)
except StopIteration:
found_names = pd.read_csv(c.path, encoding=encoding,
compression=compression)
if names and header == 'infer':
if [n.strip() for n in found_names] == [n.strip() for n in names]:
header = 0
elif (all(re.match('^\s*\D\w*\s*$', n) for n in found_names) and
not all(dt == datashape.string for dt in dshape.measure.types)):
header = 0
else:
header = None
kwargs = keyfilter(keywords(pandas.read_csv).__contains__, kwargs)
return pandas.read_csv(c.path,
header=header,
sep=sep,
encoding=encoding,
dtype=dtypes,
parse_dates=parse_dates,
names=names,
compression=compression,
chunksize=chunksize,
usecols=usecols,
**kwargs)
@convert.register(chunks(pd.DataFrame), (Temp(CSV), CSV), cost=10.0)
def CSV_to_chunks_of_dataframes(c, chunksize=2 ** 20, **kwargs):
# Load a small 1000 line DF to start
# This helps with rapid viewing of a large CSV file
first = csv_to_dataframe(c, nrows=1000, **kwargs)
if len(first) == 1000:
rest = csv_to_dataframe(
c, chunksize=chunksize, skiprows=1000, **kwargs)
else:
rest = []
def _():
yield first
for df in rest:
yield df
return chunks(pd.DataFrame)(_)
@discover.register(CSV)
def discover_csv(c, nrows=1000, **kwargs):
df = csv_to_dataframe(c, nrows=nrows, **kwargs)
df = coerce_datetimes(df)
if (list(df.columns) != list(range(len(df.columns))) and
any(re.match(r'^[-\d_]*$', c) is not None for c in df.columns)):
df = csv_to_dataframe(c, chunksize=50, has_header=False).get_chunk()
df = coerce_datetimes(df)
columns = [str(c) if not isinstance(c, (str, unicode)) else c
for c in df.columns]
df.columns = [c.strip() for c in columns]
# Replace np.nan with None. Forces type string rather than float
for col in df.columns:
if not df[col].count():
df[col] = None
measure = discover(df).measure
# Use Series.notnull to determine Option-ness
measure = Record([[name, Option(typ)
if df[name].isnull().any() and
not isinstance(typ, Option) else typ]
for name, typ in zip(measure.names, measure.types)])
return datashape.var * measure
@resource.register('.+\.(csv|tsv|ssv|data|dat)(\.gz|\.bz2?)?')
def resource_csv(uri, **kwargs):
return CSV(uri, **kwargs)
@resource.register('.*\*.+', priority=12)
def resource_glob(uri, **kwargs):
filenames = sorted(glob(uri))
r = resource(filenames[0], **kwargs)
return chunks(type(r))([resource(u, **kwargs) for u in sorted(glob(uri))])
@convert.register(chunks(pd.DataFrame), (chunks(CSV), chunks(Temp(CSV))),
cost=10.0)
def convert_glob_of_csvs_to_chunks_of_dataframes(csvs, **kwargs):
def _():
return concat(convert(chunks(pd.DataFrame), csv, **kwargs)
for csv in csvs)
return chunks(pd.DataFrame)(_)
@convert.register(Temp(CSV), (pd.DataFrame, chunks(pd.DataFrame)))
def convert_dataframes_to_temporary_csv(data, **kwargs):
fn = '.%s.csv' % uuid.uuid1()
csv = Temp(CSV)(fn, **kwargs)
return append(csv, data, **kwargs)
@dispatch(CSV)
def drop(c):
os.unlink(c.path)
ooc_types.add(CSV)
| {
"repo_name": "ywang007/odo",
"path": "odo/backends/csv.py",
"copies": "1",
"size": "9697",
"license": "bsd-3-clause",
"hash": 2656056217653931500,
"line_mean": 30.3818770227,
"line_max": 80,
"alpha_frac": 0.5851294215,
"autogenerated": false,
"ratio": 3.681473044798785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47666024662987855,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import re
import os
import gzip
import bz2
import uuid
import csv
from tempfile import NamedTemporaryFile
from glob import glob
from contextlib import contextmanager
from toolz import concat, keyfilter, keymap, merge, valfilter
import pandas as pd
from dask.threaded import get as dsk_get
import datashape
from datashape import discover, Record, Option
from datashape.predicates import isrecord
from datashape.dispatch import dispatch
from ..compatibility import unicode, PY2
from ..utils import keywords, ext, sample, tmpfile
from ..append import append
from ..convert import convert, ooc_types
from ..resource import resource
from ..chunks import chunks
from ..temp import Temp
from ..numpy_dtype import dshape_to_pandas
from .pandas import coerce_datetimes
from functools import partial
dialect_terms = '''delimiter doublequote escapechar lineterminator quotechar
quoting skipinitialspace strict'''.split()
aliases = {'sep': 'delimiter'}
class PipeSniffer(csv.Sniffer):
"""A csv sniffer that adds the ``'|'`` character to the list of preferred
delimiters and gives space the least precedence.
Notes
-----
The :module:`~csv` module has a list of preferred delimiters that will be
returned before the delimiter(s) discovered by parsing the file. This
results in a dialect having an incorrect delimiter if the data contain a
non preferred delimiter and a preferred one. See the Examples section for
an example showing the difference between the two sniffers.
Examples
--------
>>> import csv
>>> data = 'a|b| c|d' # space is preferred but '|' is more common
>>> csv.Sniffer().sniff(data).delimiter # incorrect
' '
>>> PipeSniffer().sniff(data).delimiter # correct
'|'
>>> data = 'a|b|Nov 10, 1981|d'
>>> csv.Sniffer().sniff(data).delimiter # can't handle every case :(
','
"""
def __init__(self, *args, **kwargs):
csv.Sniffer.__init__(self, *args, **kwargs)
self.preferred = [',', '\t', ';', '|', ':', ' ']
def alias(key):
""" Alias kwarg dialect keys to normalized set
>>> alias('sep')
'delimiter'
"""
key = key.lower()
return aliases.get(key, key)
class ModeProxy(object):
"""Gzipped files use int enums for mode so we want to proxy it to provide
The proper string version.
"""
def __init__(self, file, mode):
self._file = file
self.mode = mode
def __getattr__(self, attr):
return getattr(self._file, attr)
def __iter__(self):
return iter(self._file)
@contextmanager
def open_file(path, *args, **kwargs):
f = compressed_open.get(ext(path), open)(path, *args, **kwargs)
try:
yield ModeProxy(f, kwargs.get('mode', 'r'))
finally:
f.close()
def infer_header(path, nbytes=10000, encoding='utf-8', **kwargs):
if encoding is None:
encoding = 'utf-8'
with open_file(path, 'rb') as f:
raw = f.read(nbytes)
if not raw:
return True
sniffer = PipeSniffer()
decoded = raw.decode(encoding, 'replace')
sniffable = decoded.encode(encoding) if PY2 else decoded
try:
return sniffer.has_header(sniffable)
except csv.Error:
return None
def newlines(encoding):
return b'\r\n'.decode(encoding), b'\n'.decode(encoding)
def sniff_dialect(path, nbytes, encoding='utf-8'):
if not os.path.exists(path):
return {}
if encoding is None:
encoding = 'utf-8'
with open_file(path, 'rb') as f:
raw = f.read(nbytes).decode(encoding or 'utf-8', 'replace')
sniffer = PipeSniffer()
try:
dialect = sniffer.sniff(raw, delimiters=sniffer.preferred)
except csv.Error:
return {}
crnl, nl = newlines(encoding)
dialect.lineterminator = crnl if crnl in raw else nl
return dialect_to_dict(dialect)
def dialect_to_dict(dialect):
return dict((name, getattr(dialect, name))
for name in dialect_terms if hasattr(dialect, name))
class NoCloseFile(object):
def __init__(self, buffer):
self._buf = buffer
def __getattr__(self, attr):
return getattr(self._buf, attr)
def close(self):
pass
def __enter__(self):
return self._buf
def __exit__(self, *exc_info):
pass
def __iter__(self):
return iter(self._buf)
class CSV(object):
""" Proxy for a CSV file
Parameters
----------
path : str
Path to file on disk
has_header : bool
If the csv file has a header or not
encoding : str (default utf-8)
File encoding
kwargs : other...
Various choices about dialect
"""
canonical_extension = 'csv'
def __init__(self, path, has_header=None, encoding='utf-8',
sniff_nbytes=10000, buffer=None, **kwargs):
self.path = path
self._has_header = has_header
self.encoding = encoding or 'utf-8'
self._kwargs = kwargs
self._sniff_nbytes = sniff_nbytes
self._buffer = buffer
if path is not None and buffer is not None:
raise ValueError("may only pass one of 'path' or 'buffer'")
if path is None and buffer is None:
raise ValueError("must pass one of 'path' or 'buffer'")
def _sniff_dialect(self, path):
kwargs = self._kwargs
dialect = sniff_dialect(path, self._sniff_nbytes,
encoding=self.encoding)
kwargs = merge(dialect, keymap(alias, kwargs))
return valfilter(lambda x: x is not None,
dict((d, kwargs[d])
for d in dialect_terms if d in kwargs))
@property
def dialect(self):
with sample(self) as fn:
return self._sniff_dialect(fn)
@property
def has_header(self):
if self._has_header is None:
with sample(self) as fn:
with open(fn, mode='rb') as f:
raw = f.read()
self._has_header = not raw or infer_header(fn,
self._sniff_nbytes,
encoding=self.encoding)
return self._has_header
def open(self, mode='rb', **kwargs):
buf = self._buffer
if buf is not None:
return NoCloseFile(buf)
return open_file(self.path, mode=mode, **kwargs)
@sample.register(CSV)
@contextmanager
def sample_csv(csv, length=8192, **kwargs):
if csv.path is None or not os.path.exists(csv.path):
mgr = NamedTemporaryFile(suffix='.csv', mode='wb+')
else:
mgr = csv.open(mode='rb')
with mgr as f:
with tmpfile(csv.canonical_extension) as fn:
with open(fn, mode='wb') as tmpf:
tmpf.write(f.read(length))
yield fn
@append.register(CSV, object)
def append_object_to_csv(c, seq, **kwargs):
append(c, convert(chunks(pd.DataFrame), seq, **kwargs), **kwargs)
return c
compressed_open = {'gz': gzip.open, 'bz2': bz2.BZ2File}
@append.register(CSV, pd.DataFrame)
def append_dataframe_to_csv(c, df, dshape=None, **kwargs):
if not os.path.exists(c.path) or not os.path.getsize(c.path):
has_header = kwargs.pop('header', c.has_header)
else:
has_header = False
sep = kwargs.get('sep',
kwargs.get('delimiter', c.dialect.get('delimiter', ',')))
encoding = kwargs.get('encoding', c.encoding)
if ext(c.path) in compressed_open:
if sys.version_info[0] >= 3:
kwargs['mode'] = 'at'
kwargs['encoding'] = encoding
elif sys.version_info[0] == 2:
kwargs['mode'] = 'ab' if sys.platform == 'win32' else 'at'
with c.open(mode=kwargs.get('mode', 'a')) as f:
df.to_csv(f,
header=has_header,
index=False,
sep=sep,
encoding=encoding)
return c
@append.register(CSV, chunks(pd.DataFrame))
def append_iterator_to_csv(c, cs, **kwargs):
for chunk in cs:
append(c, chunk, **kwargs)
return c
@convert.register(pd.DataFrame, (Temp(CSV), CSV), cost=20.0)
def csv_to_dataframe(c, dshape=None, chunksize=None, nrows=None, **kwargs):
try:
return _csv_to_dataframe(c, dshape=dshape, chunksize=chunksize,
nrows=nrows, **kwargs)
except StopIteration:
if nrows:
return _csv_to_dataframe(c, dshape=dshape, chunksize=chunksize,
**kwargs)
else:
raise
def _csv_to_dataframe(c, dshape=None, chunksize=None, **kwargs):
header = {False: None, True: 0}.get(
kwargs.pop('has_header', c.has_header), 'infer')
sep = kwargs.pop(
'sep', kwargs.pop('delimiter', c.dialect.get('delimiter', ',')))
encoding = kwargs.pop('encoding', c.encoding)
if dshape:
dtypes, parse_dates = dshape_to_pandas(dshape)
if isrecord(dshape.measure):
names = kwargs.get('names', dshape.measure.names)
else:
names = kwargs.get('names')
else:
dtypes = parse_dates = names = None
usecols = kwargs.pop('usecols', None)
if parse_dates and usecols:
parse_dates = [col for col in parse_dates if col in usecols]
# See read_csv docs for header for reasoning
if names:
try:
with c.open() as f:
found_names = pd.read_csv(f,
nrows=1,
encoding=encoding,
sep=sep)
except StopIteration:
with c.open() as f:
found_names = pd.read_csv(f, encoding=encoding, sep=sep)
if names and header == 'infer':
if [n.strip() for n in found_names] == [n.strip() for n in names]:
header = 0
elif (all(re.match('^\s*\D\w*\s*$', n) for n in found_names) and
not all(dt == datashape.string for dt in dshape.measure.types)):
header = 0
else:
header = None
kwargs = keyfilter(keywords(pd.read_csv).__contains__, kwargs)
with c.open() as f:
return pd.read_csv(f,
header=header,
sep=sep,
encoding=encoding,
dtype=dtypes,
parse_dates=parse_dates,
names=names,
chunksize=chunksize,
usecols=usecols,
**kwargs)
@convert.register(chunks(pd.DataFrame), (Temp(CSV), CSV), cost=10.0)
def CSV_to_chunks_of_dataframes(c, chunksize=2 ** 20, **kwargs):
# Load a small 1000 line DF to start
# This helps with rapid viewing of a large CSV file
first = csv_to_dataframe(c, nrows=1000, **kwargs)
if len(first) == 1000:
rest = csv_to_dataframe(
c, chunksize=chunksize, skiprows=1000, **kwargs)
else:
rest = []
data = [first] + rest
return chunks(pd.DataFrame)(data)
@discover.register(CSV)
def discover_csv(c, nrows=1000, **kwargs):
df = csv_to_dataframe(c, nrows=nrows, **kwargs)
df = coerce_datetimes(df)
columns = [str(c) if not isinstance(c, (str, unicode)) else c
for c in df.columns]
df.columns = [c.strip() for c in columns]
# Replace np.nan with None. Forces type string rather than float
for col in df.columns:
if not df[col].count():
df[col] = None
measure = discover(df).measure
# Use Series.notnull to determine Option-ness
measure = Record([[name, Option(typ)
if df[name].isnull().any() and
not isinstance(typ, Option) else typ]
for name, typ in zip(measure.names, measure.types)])
return datashape.var * measure
@resource.register('.+\.(csv|tsv|ssv|data|dat)(\.gz|\.bz2?)?')
def resource_csv(uri, **kwargs):
return CSV(uri, **kwargs)
@resource.register('.*\*.+', priority=12)
def resource_glob(uri, **kwargs):
filenames = sorted(glob(uri))
r = resource(filenames[0], **kwargs)
return chunks(type(r))([resource(u, **kwargs) for u in sorted(glob(uri))])
@convert.register(chunks(pd.DataFrame), (chunks(CSV), chunks(Temp(CSV))),
cost=10.0)
def convert_glob_of_csvs_to_chunks_of_dataframes(csvs, **kwargs):
f = partial(convert, chunks(pd.DataFrame), **kwargs)
def df_gen():
# build up a dask graph to run all of the `convert` calls concurrently
# use a list to hold the requested key names to ensure that we return
# the results in the correct order
p = []
dsk = {}
for n, csv_ in enumerate(csvs):
key = 'p%d' % n
dsk[key] = f, csv_
p.append(key)
return concat(dsk_get(dsk, p))
return chunks(pd.DataFrame)(df_gen)
@convert.register(Temp(CSV), (pd.DataFrame, chunks(pd.DataFrame)))
def convert_dataframes_to_temporary_csv(data, **kwargs):
fn = '.%s.csv' % uuid.uuid1()
csv = Temp(CSV)(fn, **kwargs)
return append(csv, data, **kwargs)
@dispatch(CSV)
def drop(c):
if c.path is not None:
os.unlink(c.path)
ooc_types.add(CSV)
| {
"repo_name": "quantopian/odo",
"path": "odo/backends/csv.py",
"copies": "4",
"size": "13422",
"license": "bsd-3-clause",
"hash": 9091240728959497000,
"line_mean": 29.0268456376,
"line_max": 82,
"alpha_frac": 0.5815824765,
"autogenerated": false,
"ratio": 3.758611033323999,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00033695070192381804,
"num_lines": 447
} |
from __future__ import absolute_import, division, print_function
import sys
import re
import os
import gzip
import bz2
import uuid
import csv
from glob import glob
from contextlib import contextmanager
from toolz import concat, keyfilter, keymap, merge, valfilter
import pandas as pd
import datashape
from datashape import discover, Record, Option
from datashape.predicates import isrecord
from datashape.dispatch import dispatch
from ..compatibility import unicode, PY2
from ..utils import keywords, ext, sample, tmpfile
from ..append import append
from ..convert import convert, ooc_types
from ..resource import resource
from ..chunks import chunks
from ..temp import Temp
from ..numpy_dtype import dshape_to_pandas
from .pandas import coerce_datetimes
dialect_terms = '''delimiter doublequote escapechar lineterminator quotechar
quoting skipinitialspace strict'''.split()
aliases = {'sep': 'delimiter'}
class PipeSniffer(csv.Sniffer):
"""A csv sniffer that adds the ``'|'`` character to the list of preferred
delimiters and gives space the least precedence.
Notes
-----
The :module:`~csv` module has a list of preferred delimiters that will be
returned before the delimiter(s) discovered by parsing the file. This
results in a dialect having an incorrect delimiter if the data contain a
non preferred delimiter and a preferred one. See the Examples section for
an example showing the difference between the two sniffers.
Examples
--------
>>> import csv
>>> data = 'a|b| c|d' # space is preferred but '|' is more common
>>> csv.Sniffer().sniff(data).delimiter # incorrect
' '
>>> PipeSniffer().sniff(data).delimiter # correct
'|'
>>> data = 'a|b|Nov 10, 1981|d'
>>> csv.Sniffer().sniff(data).delimiter # can't handle every case :(
','
"""
def __init__(self, *args, **kwargs):
csv.Sniffer.__init__(self, *args, **kwargs)
self.preferred = [',', '\t', ';', '|', ':', ' ']
def alias(key):
""" Alias kwarg dialect keys to normalized set
>>> alias('sep')
'delimiter'
"""
key = key.lower()
return aliases.get(key, key)
@contextmanager
def open_file(path, *args, **kwargs):
f = compressed_open.get(ext(path), open)(path, *args, **kwargs)
try:
yield f
finally:
f.close()
def infer_header(path, nbytes=10000, encoding='utf-8', **kwargs):
if encoding is None:
encoding = 'utf-8'
with open_file(path, 'rb') as f:
raw = f.read(nbytes)
if not raw:
return True
sniffer = PipeSniffer()
decoded = raw.decode(encoding, 'replace')
sniffable = decoded.encode(encoding) if PY2 else decoded
try:
return sniffer.has_header(sniffable)
except csv.Error:
return None
def newlines(encoding):
return b'\r\n'.decode(encoding), b'\n'.decode(encoding)
def sniff_dialect(path, nbytes, encoding='utf-8'):
if not os.path.exists(path):
return {}
if encoding is None:
encoding = 'utf-8'
with open_file(path, 'rb') as f:
raw = f.read(nbytes).decode(encoding or 'utf-8', 'replace')
sniffer = PipeSniffer()
try:
dialect = sniffer.sniff(raw, delimiters=sniffer.preferred)
except csv.Error:
return {}
crnl, nl = newlines(encoding)
dialect.lineterminator = crnl if crnl in raw else nl
return dialect_to_dict(dialect)
def dialect_to_dict(dialect):
return dict((name, getattr(dialect, name))
for name in dialect_terms if hasattr(dialect, name))
class CSV(object):
""" Proxy for a CSV file
Parameters
----------
path : str
Path to file on disk
has_header : bool
If the csv file has a header or not
encoding : str (default utf-8)
File encoding
kwargs : other...
Various choices about dialect
"""
canonical_extension = 'csv'
def __init__(self, path, has_header=None, encoding='utf-8',
sniff_nbytes=10000, **kwargs):
self.path = path
self._has_header = has_header
self.encoding = encoding or 'utf-8'
self._kwargs = kwargs
self._sniff_nbytes = sniff_nbytes
def _sniff_dialect(self, path):
kwargs = self._kwargs
dialect = sniff_dialect(path, self._sniff_nbytes,
encoding=self.encoding)
kwargs = merge(dialect, keymap(alias, kwargs))
return valfilter(lambda x: x is not None,
dict((d, kwargs[d])
for d in dialect_terms if d in kwargs))
@property
def dialect(self):
with sample(self) as fn:
return self._sniff_dialect(fn)
@property
def has_header(self):
if self._has_header is None:
with sample(self) as fn:
with open(fn, mode='rb') as f:
raw = f.read()
self._has_header = not raw or infer_header(fn,
self._sniff_nbytes,
encoding=self.encoding)
return self._has_header
@sample.register(CSV)
@contextmanager
def sample_csv(csv, length=8192, **kwargs):
should_delete = not os.path.exists(csv.path)
if should_delete:
with open_file(csv.path, mode='wb'):
pass
try:
with open_file(csv.path, mode='rb') as f:
with tmpfile(csv.canonical_extension) as fn:
with open(fn, mode='wb') as tmpf:
tmpf.write(f.read(length))
yield fn
finally:
if should_delete:
os.remove(csv.path)
@append.register(CSV, object)
def append_object_to_csv(c, seq, **kwargs):
append(c, convert(chunks(pd.DataFrame), seq, **kwargs), **kwargs)
return c
compressed_open = {'gz': gzip.open, 'bz2': bz2.BZ2File}
@append.register(CSV, pd.DataFrame)
def append_dataframe_to_csv(c, df, dshape=None, **kwargs):
if not os.path.exists(c.path) or not os.path.getsize(c.path):
has_header = kwargs.pop('header', c.has_header)
else:
has_header = False
sep = kwargs.get('sep',
kwargs.get('delimiter', c.dialect.get('delimiter', ',')))
encoding = kwargs.get('encoding', c.encoding)
if ext(c.path) in compressed_open:
if sys.version_info[0] >= 3:
kwargs['mode'] = 'at'
kwargs['encoding'] = encoding
elif sys.version_info[0] == 2:
kwargs['mode'] = 'ab' if sys.platform == 'win32' else 'at'
f = compressed_open[ext(c.path)](c.path, **kwargs)
else:
f = c.path
try:
df.to_csv(f, mode='a', header=has_header, index=False, sep=sep,
encoding=encoding)
finally:
if hasattr(f, 'close'):
f.close()
return c
@append.register(CSV, chunks(pd.DataFrame))
def append_iterator_to_csv(c, cs, **kwargs):
for chunk in cs:
append(c, chunk, **kwargs)
return c
@convert.register(pd.DataFrame, (Temp(CSV), CSV), cost=20.0)
def csv_to_dataframe(c, dshape=None, chunksize=None, nrows=None, **kwargs):
try:
return _csv_to_dataframe(c, dshape=dshape, chunksize=chunksize,
nrows=nrows, **kwargs)
except StopIteration:
if nrows:
return _csv_to_dataframe(c, dshape=dshape, chunksize=chunksize,
**kwargs)
else:
raise
def _csv_to_dataframe(c, dshape=None, chunksize=None, **kwargs):
header = {False: None, True: 0}.get(
kwargs.pop('has_header', c.has_header), 'infer')
sep = kwargs.pop(
'sep', kwargs.pop('delimiter', c.dialect.get('delimiter', ',')))
encoding = kwargs.pop('encoding', c.encoding)
if dshape:
dtypes, parse_dates = dshape_to_pandas(dshape)
if isrecord(dshape.measure):
names = kwargs.get('names', dshape.measure.names)
else:
names = kwargs.get('names')
else:
dtypes = parse_dates = names = None
usecols = kwargs.pop('usecols', None)
if parse_dates and usecols:
parse_dates = [col for col in parse_dates if col in usecols]
compression = kwargs.pop('compression',
{'gz': 'gzip', 'bz2': 'bz2'}.get(ext(c.path)))
# See read_csv docs for header for reasoning
if names:
try:
found_names = pd.read_csv(c.path, encoding=encoding,
compression=compression, nrows=1)
except StopIteration:
found_names = pd.read_csv(c.path, encoding=encoding,
compression=compression)
if names and header == 'infer':
if [n.strip() for n in found_names] == [n.strip() for n in names]:
header = 0
elif (all(re.match('^\s*\D\w*\s*$', n) for n in found_names) and
not all(dt == datashape.string for dt in dshape.measure.types)):
header = 0
else:
header = None
kwargs = keyfilter(keywords(pd.read_csv).__contains__, kwargs)
return pd.read_csv(c.path,
header=header,
sep=sep,
encoding=encoding,
dtype=dtypes,
parse_dates=parse_dates,
names=names,
compression=compression,
chunksize=chunksize,
usecols=usecols,
**kwargs)
@convert.register(chunks(pd.DataFrame), (Temp(CSV), CSV), cost=10.0)
def CSV_to_chunks_of_dataframes(c, chunksize=2 ** 20, **kwargs):
# Load a small 1000 line DF to start
# This helps with rapid viewing of a large CSV file
first = csv_to_dataframe(c, nrows=1000, **kwargs)
if len(first) == 1000:
rest = csv_to_dataframe(
c, chunksize=chunksize, skiprows=1000, **kwargs)
else:
rest = []
def _():
yield first
for df in rest:
yield df
return chunks(pd.DataFrame)(_)
@discover.register(CSV)
def discover_csv(c, nrows=1000, **kwargs):
df = csv_to_dataframe(c, nrows=nrows, **kwargs)
df = coerce_datetimes(df)
if (list(df.columns) != list(range(len(df.columns))) and
any(re.match(r'^[-\d_]*$', c) is not None for c in df.columns)):
df = csv_to_dataframe(c, chunksize=50, has_header=False).get_chunk()
df = coerce_datetimes(df)
columns = [str(c) if not isinstance(c, (str, unicode)) else c
for c in df.columns]
df.columns = [c.strip() for c in columns]
# Replace np.nan with None. Forces type string rather than float
for col in df.columns:
if not df[col].count():
df[col] = None
measure = discover(df).measure
# Use Series.notnull to determine Option-ness
measure = Record([[name, Option(typ)
if df[name].isnull().any() and
not isinstance(typ, Option) else typ]
for name, typ in zip(measure.names, measure.types)])
return datashape.var * measure
@resource.register('.+\.(csv|tsv|ssv|data|dat)(\.gz|\.bz2?)?')
def resource_csv(uri, **kwargs):
return CSV(uri, **kwargs)
@resource.register('.*\*.+', priority=12)
def resource_glob(uri, **kwargs):
filenames = sorted(glob(uri))
r = resource(filenames[0], **kwargs)
return chunks(type(r))([resource(u, **kwargs) for u in sorted(glob(uri))])
@convert.register(chunks(pd.DataFrame), (chunks(CSV), chunks(Temp(CSV))),
cost=10.0)
def convert_glob_of_csvs_to_chunks_of_dataframes(csvs, **kwargs):
def _():
return concat(convert(chunks(pd.DataFrame), csv, **kwargs)
for csv in csvs)
return chunks(pd.DataFrame)(_)
@convert.register(Temp(CSV), (pd.DataFrame, chunks(pd.DataFrame)))
def convert_dataframes_to_temporary_csv(data, **kwargs):
fn = '.%s.csv' % uuid.uuid1()
csv = Temp(CSV)(fn, **kwargs)
return append(csv, data, **kwargs)
@dispatch(CSV)
def drop(c):
os.unlink(c.path)
ooc_types.add(CSV)
| {
"repo_name": "cpcloud/odo",
"path": "odo/backends/csv.py",
"copies": "1",
"size": "12189",
"license": "bsd-3-clause",
"hash": -199809839448757820,
"line_mean": 29.9365482234,
"line_max": 82,
"alpha_frac": 0.5850356879,
"autogenerated": false,
"ratio": 3.7082445999391545,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9791368905050323,
"avg_score": 0.00038227655776636204,
"num_lines": 394
} |
from __future__ import absolute_import, division, print_function
import sys
import re
import subprocess
from os.path import join, basename
from conda_build.conda_interface import memoized
from conda_build.conda_interface import untracked
from conda_build.conda_interface import linked_data
from conda_build.os_utils.macho import otool
from conda_build.os_utils.pyldd import codefile_class, inspect_linkages, machofile, is_codefile
LDD_RE = re.compile(r'\s*(.*?)\s*=>\s*(.*?)\s*\(.*\)')
LDD_NOT_FOUND_RE = re.compile(r'\s*(.*?)\s*=>\s*not found')
def ldd(path):
"thin wrapper around ldd"
lines = subprocess.check_output(['ldd', path]).decode('utf-8').splitlines()
res = []
for line in lines:
if '=>' not in line:
continue
assert line[0] == '\t', (path, line)
m = LDD_RE.match(line)
if m:
res.append(m.groups())
continue
m = LDD_NOT_FOUND_RE.match(line)
if m:
res.append((m.group(1), 'not found'))
continue
if 'ld-linux' in line:
continue
raise RuntimeError("Unexpected output from ldd: %s" % line)
return res
@memoized
def get_linkages(obj_files, prefix, sysroot):
res = {}
for f in obj_files:
path = join(prefix, f)
# ldd quite often fails on foreign architectures.
ldd_failed = False
# Detect the filetype to emulate what the system-native tool does.
klass = codefile_class(path)
if klass == machofile:
resolve_filenames = False
recurse = False
else:
resolve_filenames = True
recurse = True
try:
if sys.platform.startswith('linux'):
res[f] = ldd(path)
elif sys.platform.startswith('darwin'):
links = otool(path)
res[f] = [(basename(l['name']), l['name']) for l in links]
except:
ldd_failed = True
finally:
res_py = inspect_linkages(path, resolve_filenames=resolve_filenames,
sysroot=sysroot, recurse=recurse)
res_py = [(basename(lp), lp) for lp in res_py]
if ldd_failed:
res[f] = res_py
else:
if set(res[f]) != set(res_py):
print("WARNING: pyldd disagrees with ldd/otool. This will not cause any")
print("WARNING: problems for this build, but please file a bug at:")
print("WARNING: https://github.com/conda/conda-build")
print("WARNING: and (if possible) attach file {}".format(path))
print("WARNING: \nldd/otool gives:\n{}\npyldd gives:\n{}\n"
.format("\n".join(str(e) for e in res[f]), "\n".join(str(e)
for e in res_py)))
print("Diffs\n{}".format(set(res[f]) - set(res_py)))
print("Diffs\n{}".format(set(res_py) - set(res[f])))
return res
@memoized
def get_package_obj_files(dist, prefix):
data = linked_data(prefix).get(dist)
res = []
if data:
for f in data.get('files', []):
path = join(prefix, f)
if is_codefile(path):
res.append(f)
return res
@memoized
def get_untracked_obj_files(prefix):
res = []
files = untracked(prefix)
for f in files:
path = join(prefix, f)
if is_codefile(path):
res.append(f)
return res
| {
"repo_name": "pelson/conda-build",
"path": "conda_build/os_utils/ldd.py",
"copies": "4",
"size": "3589",
"license": "bsd-3-clause",
"hash": 6148140749766158000,
"line_mean": 31.3333333333,
"line_max": 97,
"alpha_frac": 0.5377542491,
"autogenerated": false,
"ratio": 3.730769230769231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.626852347986923,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import re
import subprocess
import json
from os.path import join, basename
from conda.utils import memoized
from conda.misc import untracked
from conda_build import post
from conda_build.macho import otool
LDD_RE = re.compile(r'\s*(.*?)\s*=>\s*(.*?)\s*\(.*\)')
LDD_NOT_FOUND_RE = re.compile(r'\s*(.*?)\s*=>\s*not found')
def ldd(path):
"thin wrapper around ldd"
lines = subprocess.check_output(['ldd', path]).decode('utf-8').splitlines()
res = []
for line in lines:
if '=>' not in line:
continue
assert line[0] == '\t', (path, line)
m = LDD_RE.match(line)
if m:
res.append(m.groups())
continue
m = LDD_NOT_FOUND_RE.match(line)
if m:
res.append((m.group(1), 'not found'))
continue
if 'ld-linux' in line:
continue
raise RuntimeError("Unexpected output from ldd: %s" % line)
return res
@memoized
def get_linkages(obj_files, prefix):
res = {}
for f in obj_files:
path = join(prefix, f)
if sys.platform.startswith('linux'):
res[f] = ldd(path)
elif sys.platform.startswith('darwin'):
links = otool(path)
res[f] = [(basename(l), l) for l in links]
return res
@memoized
def get_package_obj_files(dist, prefix):
with open(join(prefix, 'conda-meta', dist +
'.json')) as f:
data = json.load(f)
res = []
files = data['files']
for f in files:
path = join(prefix, f)
if post.is_obj(path):
res.append(f)
return res
@memoized
def get_untracked_obj_files(prefix):
res = []
files = untracked(prefix)
for f in files:
path = join(prefix, f)
if post.is_obj(path):
res.append(f)
return res
| {
"repo_name": "rmcgibbo/conda-build",
"path": "conda_build/ldd.py",
"copies": "8",
"size": "1895",
"license": "bsd-3-clause",
"hash": 1919287085528393700,
"line_mean": 22.9873417722,
"line_max": 79,
"alpha_frac": 0.5667546174,
"autogenerated": false,
"ratio": 3.3718861209964412,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7938640738396442,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import string
import llvm.core as lc
from llvm import LLVMException
from .. import llvm_array as lla
from .blaze_kernels import BlazeElementKernel, refresh_name
from . import blaze_kernels
from blaze.py2help import _strtypes
def letters(source=string.ascii_lowercase):
k = 0
while 1:
for a in source:
yield a+str(k) if k else a
k = k+1
# An Argument to a kernel tree (encapsulates the array, argument kind and rank)
# FIXME --- perhaps we should have _llvmtype be a SCALAR kind or a string that gets converted
# to the correct llvmtype when needed.
class Argument(object):
_shape = None
def __init__(self, dshape, kind, rank, llvmtype):
self.dshape = dshape
if isinstance(kind, tuple):
kind = kind[0]
self.kind = kind
self.rank = rank
self._llvmtype = llvmtype
#def __eq__(self, other):
# if not isinstance(other, Argument):
# return NotImplemented
# # FIXME: Should remove kind check and cast different kinds
# # in the generated code.
# return ((self.dshape is other.dshape) and
# (self.rank == other.rank) and
# (self.kind==other.kind))
# FIXME:
# Because module linking destroys struct names we need to store
# a stringified version of the element type and then
# convert as needed...
@property
def llvmtype(self):
self._llvmtype = refresh_name(self._llvmtype)
return self._llvmtype
#@property
#def shape(self):
# if self._shape is None:
# if self.rank == 0:
# self._shape = ()
# else:
# self._shape = self.arr.dshape.shape[-self.rank:]
# return self._shape
def get_kernel_dshape(self):
"""Returns the kernel data-shape of the argument."""
rank = self.rank
total_dshape = self.dshape
sub = len(total_dshape)-1-rank
return total_dshape.subarray(sub)
# A KernelTree is just the bare element-wise kernel functions
# (no arguments). Any arguments are identified as unique-names
# in an abstract name-space
# All nodes in the kernel tree can also be named
# or else a unique-name
# from the abstract name-space will be created.
# Each KernelTree has a single llvm module name-space
class KernelTree(object):
_stream_of_unique_names = letters()
_stream_of_unique_kernels = letters()
_fused = None
_funcptr = None
_ctypes = None
_mark = False
_shape = None
def __init__(self, kernel, children=[], name=None):
assert isinstance(kernel, BlazeElementKernel)
for el in children:
assert isinstance(el, (KernelTree, Argument))
self.kernel = kernel
self.children = children
if name is None:
name = 'node_' + next(self._stream_of_unique_names)
self.name = name
def _reset_marks(self):
self._mark = False
if not self.leafnode:
for child in self.children:
if isinstance(child, KernelTree):
child._reset_marks()
def sorted_nodes(self):
"""Return depth-first list of unique KernelTree Nodes.
The root of the tree will be the last node.
"""
nodes = []
self._reset_marks()
self.visit(nodes)
return nodes
@property
def shape(self):
if self._shape is None:
shapeargs = [child.shape for child in self.children]
self._shape = self.kernel.shapefunc(*shapeargs)
return self._shape
@property
def leafnode(self):
return all(isinstance(child, Argument) for child in self.children)
def visit(self, nodes):
if not self.leafnode:
for child in self.children:
if isinstance(child, KernelTree):
child.visit(nodes)
if not self._mark:
nodes.append(self)
self._mark = True
def fuse(self):
if self._fused is not None:
return self._fused
# Even if self is a leaf node (self.leafnode is True), do
# this processing, so as to consistently combine repeated
# arguments.
krnlobj, children = fuse_kerneltree(self, self.kernel.module)
new = KernelTree(krnlobj, children)
self._update_kernelptrs(new)
return new
def _update_kernelptrs(self, eltree):
self._fused = eltree
kernel = eltree.kernel
self._funcptr = self._funcptr or kernel.func_ptr
self._ctypes = self._ctypes or kernel.ctypes_func
@property
def func_ptr(self):
if self._funcptr is None:
self.fuse()
return self._funcptr
@property
def ctypes_func(self):
if self._ctypes is None:
self.fuse()
return self._ctypes
def make_ckernel_deferred(self, out_dshape):
return self.fuse().kernel.make_ckernel_deferred(out_dshape)
def __str__(self):
pre = self.name + '('
post = ')'
strs = []
for child in self.children:
if isinstance(child, Argument):
strs.append('<arg>')
else:
strs.append(str(child))
body = ",".join(strs)
return pre + body + post
class _cleanup(object):
def __init__(self, builder, freefunc, freedata):
self.freefunc = freefunc
self.freedata = freedata
self.builder = builder
def _dealloc(self):
self.builder.call(self.freefunc, [self.freedata])
# This modifies the node to add a reference the output as llvm_obj
def insert_instructions(node, builder, output=None):
kernel = node.kernel
is_scalar = (kernel.kinds[-1] == lla.SCALAR)
#allocate space for output if necessary
new = None
if output is None:
if kernel.kinds[-1] == lla.POINTER:
output = builder.alloca(kernel.argtypes[-1].pointee)
elif not is_scalar: # Array
kind = kernel.kinds[-1][0]
eltype = kernel.argtypes[-1].pointee.elements[0].pointee
assert node.shape is not None
assert kernel.argtypes[-1].pointee.elements[1].count == len(node.shape)
output, freefunc, freedata = lla.create_array(
builder, node.shape, kind, eltype)
new = _cleanup(builder, freefunc, freedata)
#Setup the argument list
args = [child.llvm_obj for child in node.children]
if not is_scalar:
args.append(output)
# call the kernel corresponding to this node
# bitcast any arguments that don't match the kernel.function type
# for array types and pointer types... Needed because inputs might
# be from different compilers...
newargs = []
kfunc_args = kernel.func.type.pointee.args
for kind, oldarg, needed_type in zip(kernel.kinds, args, kfunc_args):
newarg = oldarg
if (kind != lla.SCALAR) and (needed_type != oldarg.type):
newarg = builder.bitcast(oldarg, needed_type)
newargs.append(newarg)
res = builder.call(kernel.func, newargs)
assert kernel.func.module is builder.basic_block.function.module
if is_scalar:
node.llvm_obj = res
else:
node.llvm_obj = output
return new
# This also replaces arguments with the unique argument in the kernel tree
def find_unique_args(tree, unique_args):
for i, element in enumerate(tree.children):
if isinstance(element, Argument):
try:
index = unique_args.index(element)
except ValueError: # not found
unique_args.append(element)
else:
tree.children[i] = unique_args[index]
else:
find_unique_args(element, unique_args)
def get_fused_type(tree):
"""Get the function type of the compound kernel
"""
outkrn = tree.kernel
# If this is not a SCALAR then we need to attach another node
out_kind = outkrn.kinds[-1]
out_type = outkrn.func.type.pointee.return_type
unique_args = []
find_unique_args(tree, unique_args)
args = [arg.llvmtype for arg in unique_args]
if out_kind != lla.SCALAR:
args.append(outkrn.argtypes[-1])
return unique_args, lc.Type.function(out_type, args)
def fuse_kerneltree(tree, module_or_name):
"""Fuse the kernel tree into a single kernel object with the common names
Examples:
add(multiply(b,c),subtract(d,f))
var tmp0 = multiply(b,c)
var tmp1 = subtract(d,f)
return add(tmp0, tmp1)
var tmp0;
var tmp1;
multiply(b,c,&tmp0)
subtract(d,f,&tmp1)
add(tmp0, tmp1, &res)
"""
if isinstance(module_or_name, _strtypes):
module = Module.new(module_or_name)
else:
module = module_or_name
args, func_type = get_fused_type(tree)
outdshape = tree.kernel.dshapes[-1]
try:
func = module.get_function_named(tree.name+"_fused")
except LLVMException:
func = lc.Function.new(module, func_type, tree.name+"_fused")
block = func.append_basic_block('entry')
builder = lc.Builder.new(block)
# TODO: Create wrapped function for functions
# that need to loop over their inputs
# Attach the llvm_object to the Argument objects
for i, arg in enumerate(args):
arg.llvm_obj = func.args[i]
# topologically sort the kernel-tree nodes and then for each node
# site we issue instructions to compute the value
nodelist = tree.sorted_nodes()
cleanup = [] # Objects to deallocate any temporary heap memory needed
# ust have a _dealloc method
def _temp_cleanup():
for obj in cleanup:
if obj is not None:
obj._dealloc()
#import pdb
#pdb.set_trace()
for node in nodelist[:-1]:
node.kernel.attach(module)
new = insert_instructions(node, builder)
cleanup.append(new)
nodelist[-1].kernel.attach(module)
if tree.kernel.kinds[-1] == lla.SCALAR:
new = insert_instructions(nodelist[-1], builder)
cleanup.append(new)
_temp_cleanup()
builder.ret(nodelist[-1].llvm_obj)
else:
new = insert_instructions(nodelist[-1], builder, func.args[-1])
cleanup.append(new)
_temp_cleanup()
builder.ret_void()
dshapes = [arg.get_kernel_dshape() for arg in args]
dshapes.append(outdshape)
newkernel = BlazeElementKernel(func, dshapes)
return newkernel, args
| {
"repo_name": "zeeshanali/blaze",
"path": "blaze/compute/bkernel/kernel_tree.py",
"copies": "2",
"size": "10731",
"license": "bsd-3-clause",
"hash": 3762502417649541600,
"line_mean": 30.4692082111,
"line_max": 93,
"alpha_frac": 0.6034852297,
"autogenerated": false,
"ratio": 3.8421052631578947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005245098804009226,
"num_lines": 341
} |
from __future__ import absolute_import, division, print_function
import sys
import subprocess
from os.path import islink, isfile
from conda_build import utils
NO_EXT = (
'.py', '.pyc', '.pyo', '.h', '.a', '.c', '.txt', '.html',
'.xml', '.png', '.jpg', '.gif', '.class',
)
MAGIC = {
b'\xca\xfe\xba\xbe': 'MachO-universal',
b'\xce\xfa\xed\xfe': 'MachO-i386',
b'\xcf\xfa\xed\xfe': 'MachO-x86_64',
b'\xfe\xed\xfa\xce': 'MachO-ppc',
b'\xfe\xed\xfa\xcf': 'MachO-ppc64',
}
FILETYPE = {
1: 'MH_OBJECT',
2: 'MH_EXECUTE',
3: 'MH_FVMLIB',
4: 'MH_CORE',
5: 'MH_PRELOAD',
6: 'MH_DYLIB',
7: 'MH_DYLINKER',
8: 'MH_BUNDLE',
9: 'MH_DYLIB_STUB',
10: 'MH_DSYM',
11: 'MH_KEXT_BUNDLE',
}
def is_macho(path):
if path.endswith(NO_EXT) or islink(path) or not isfile(path):
return False
with open(path, 'rb') as fi:
head = fi.read(4)
return bool(head in MAGIC)
def is_dylib(path):
return human_filetype(path) == 'DYLIB'
def human_filetype(path):
output, _ = utils.execute(['otool', '-h', path], check_exit_code=True)
lines = output.splitlines()
assert lines[0].startswith(path), path
for line in lines:
if line.strip().startswith('0x'):
header = line.split()
filetype = int(header[4])
return FILETYPE[filetype][3:]
def otool(path):
"thin wrapper around otool -L"
output, _ = utils.execute(['otool', '-L', path], check_exit_code=True)
lines = output.splitlines()
assert lines[0].startswith(path), path
res = []
for line in lines[1:]:
assert line[0] == '\t', path
res.append(line.split()[0])
return res
def get_rpaths(path):
output, _ = utils.execute(['otool', '-l', path], check_exit_code=True)
lines = output.splitlines()
check_for_rpath = False
rpaths = []
for line in lines:
if 'cmd LC_RPATH' in line:
check_for_rpath = True
if check_for_rpath and 'path' in line:
_, rpath, _ = line.split(None, 2)
rpaths.append(rpath)
return rpaths
def install_name_change(path, cb_func):
"""
change dynamic shared library install names of Mach-O binary `path`.
`cb_func` is a callback function which called for each shared library name.
It is called with `path` and the current shared library install name,
and return the new name (or None if the name should be unchanged).
"""
changes = []
for link in otool(path):
# The first link may be the install name of the library itself, but
# this isn't a big deal because install_name_tool -change is a no-op
# if given a dependent install name that doesn't exist.
new_link = cb_func(path, link)
if new_link:
changes.append((link, new_link))
ret = True
for old, new in changes:
return_code = 0
args = ['install_name_tool', '-change', old, new, path]
print(' '.join(args))
try:
stdout, stderr = utils.execute(args, check_exit_code=True)
except subprocess.CalledProcessError as exc:
stdout, stderr = exc.output
return_code = exc.return_code
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s" % path)
ret = False
continue
else:
print(stderr, file=sys.stderr)
if return_code:
raise RuntimeError("install_name_tool failed with exit "
"status %d" % return_code)
return ret
if __name__ == '__main__':
if sys.platform == 'darwin':
for path in '/bin/ls', '/etc/locate.rc':
print(path, is_macho(path))
| {
"repo_name": "rmcgibbo/conda-build",
"path": "conda_build/macho.py",
"copies": "2",
"size": "3780",
"license": "bsd-3-clause",
"hash": 5980538053174119000,
"line_mean": 28.3023255814,
"line_max": 79,
"alpha_frac": 0.5788359788,
"autogenerated": false,
"ratio": 3.402340234023402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9973239133351894,
"avg_score": 0.0015874158943014393,
"num_lines": 129
} |
from __future__ import absolute_import, division, print_function
import sys
import subprocess
from os.path import islink, isfile
NO_EXT = (
'.py', '.pyc', '.pyo', '.h', '.a', '.c', '.txt', '.html',
'.xml', '.png', '.jpg', '.gif', '.class',
)
MAGIC = {
b'\xca\xfe\xba\xbe': 'MachO-universal',
b'\xce\xfa\xed\xfe': 'MachO-i386',
b'\xcf\xfa\xed\xfe': 'MachO-x86_64',
b'\xfe\xed\xfa\xce': 'MachO-ppc',
b'\xfe\xed\xfa\xcf': 'MachO-ppc64',
}
FILETYPE = {
1: 'MH_OBJECT',
2: 'MH_EXECUTE',
3: 'MH_FVMLIB',
4: 'MH_CORE',
5: 'MH_PRELOAD',
6: 'MH_DYLIB',
7: 'MH_DYLINKER',
8: 'MH_BUNDLE',
9: 'MH_DYLIB_STUB',
10: 'MH_DSYM',
11: 'MH_KEXT_BUNDLE',
}
def is_macho(path):
if path.endswith(NO_EXT) or islink(path) or not isfile(path):
return False
with open(path, 'rb') as fi:
head = fi.read(4)
return bool(head in MAGIC)
def is_dylib(path):
return human_filetype(path) == 'DYLIB'
def human_filetype(path):
lines = subprocess.check_output(['otool', '-h', path]).decode('utf-8').splitlines()
assert lines[0].startswith(path), path
for line in lines:
if line.strip().startswith('0x'):
header = line.split()
filetype = int(header[4])
return FILETYPE[filetype][3:]
def otool(path):
"thin wrapper around otool -L"
lines = subprocess.check_output(['otool', '-L', path]).decode('utf-8').splitlines()
assert lines[0].startswith(path), path
res = []
for line in lines[1:]:
assert line[0] == '\t', path
res.append(line.split()[0])
return res
def get_rpaths(path):
lines = subprocess.check_output(['otool', '-l',
path]).decode('utf-8').splitlines()
check_for_rpath = False
rpaths = []
for line in lines:
if 'cmd LC_RPATH' in line:
check_for_rpath = True
if check_for_rpath and 'path' in line:
_, rpath, _ = line.split(None, 2)
rpaths.append(rpath)
return rpaths
def install_name_change(path, cb_func):
"""
change dynamic shared library install names of Mach-O binary `path`.
`cb_func` is a callback function which called for each shared library name.
It is called with `path` and the current shared library install name,
and return the new name (or None if the name should be unchanged).
"""
changes = []
for link in otool(path):
# The first link may be the install name of the library itself, but
# this isn't a big deal because install_name_tool -change is a no-op
# if given a dependent install name that doesn't exist.
new_link = cb_func(path, link)
if new_link:
changes.append((link, new_link))
ret = True
for old, new in changes:
args = ['install_name_tool', '-change', old, new, path]
print(' '.join(args))
p = subprocess.Popen(args, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8')
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s" % path)
ret = False
continue
else:
print(stderr, file=sys.stderr)
if p.returncode:
raise RuntimeError("install_name_tool failed with exit status %d"
% p.returncode)
return ret
if __name__ == '__main__':
if sys.platform == 'darwin':
for path in '/bin/ls', '/etc/locate.rc':
print(path, is_macho(path))
| {
"repo_name": "frol/conda-build",
"path": "conda_build/macho.py",
"copies": "2",
"size": "3576",
"license": "bsd-3-clause",
"hash": 4032862317009906000,
"line_mean": 29.5641025641,
"line_max": 87,
"alpha_frac": 0.5813758389,
"autogenerated": false,
"ratio": 3.3704052780395855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49517811169395853,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import textwrap
import warnings
import email
import time
import random
import threading
import json
import stripe
from stripe import error, util, six
from stripe.request_metrics import RequestMetrics
# - Requests is the preferred HTTP library
# - Google App Engine has urlfetch
# - Use Pycurl if it's there (at least it verifies SSL certs)
# - Fall back to urllib2 with a warning if needed
try:
from stripe.six.moves import urllib
except ImportError:
# Try to load in urllib2, but don't sweat it if it's not available.
pass
try:
import pycurl
except ImportError:
pycurl = None
try:
import requests
except ImportError:
requests = None
else:
try:
# Require version 0.8.8, but don't want to depend on distutils
version = requests.__version__
major, minor, patch = [int(i) for i in version.split(".")]
except Exception:
# Probably some new-fangled version, so it should support verify
pass
else:
if (major, minor, patch) < (0, 8, 8):
sys.stderr.write(
"Warning: the Stripe library requires that your Python "
'"requests" library be newer than version 0.8.8, but your '
'"requests" library is version %s. Stripe will fall back to '
"an alternate HTTP library so everything should work. We "
'recommend upgrading your "requests" library. If you have any '
"questions, please contact support@stripe.com. (HINT: running "
'"pip install -U requests" should upgrade your requests '
"library to the latest version.)" % (version,)
)
requests = None
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
# proxy support for the pycurl client
from stripe.six.moves.urllib.parse import urlparse
def _now_ms():
return int(round(time.time() * 1000))
def new_default_http_client(*args, **kwargs):
if urlfetch:
impl = UrlFetchClient
elif requests:
impl = RequestsClient
elif pycurl:
impl = PycurlClient
else:
impl = Urllib2Client
warnings.warn(
"Warning: the Stripe library is falling back to urllib2/urllib "
"because neither requests nor pycurl are installed. "
"urllib2's SSL implementation doesn't verify server "
"certificates. For improved security, we suggest installing "
"requests."
)
return impl(*args, **kwargs)
class HTTPClient(object):
MAX_DELAY = 2
INITIAL_DELAY = 0.5
MAX_RETRY_AFTER = 60
def __init__(self, verify_ssl_certs=True, proxy=None):
self._verify_ssl_certs = verify_ssl_certs
if proxy:
if isinstance(proxy, str):
proxy = {"http": proxy, "https": proxy}
if not isinstance(proxy, dict):
raise ValueError(
"Proxy(ies) must be specified as either a string "
"URL or a dict() with string URL under the"
" "
"https"
" and/or "
"http"
" keys."
)
self._proxy = proxy.copy() if proxy else None
self._thread_local = threading.local()
def request_with_retries(self, method, url, headers, post_data=None):
return self._request_with_retries_internal(
method, url, headers, post_data, is_streaming=False
)
def request_stream_with_retries(
self, method, url, headers, post_data=None
):
return self._request_with_retries_internal(
method, url, headers, post_data, is_streaming=True
)
def _request_with_retries_internal(
self, method, url, headers, post_data, is_streaming
):
self._add_telemetry_header(headers)
num_retries = 0
while True:
request_start = _now_ms()
try:
if is_streaming:
response = self.request_stream(
method, url, headers, post_data
)
else:
response = self.request(method, url, headers, post_data)
connection_error = None
except error.APIConnectionError as e:
connection_error = e
response = None
if self._should_retry(response, connection_error, num_retries):
if connection_error:
util.log_info(
"Encountered a retryable error %s"
% connection_error.user_message
)
num_retries += 1
sleep_time = self._sleep_time_seconds(num_retries, response)
util.log_info(
(
"Initiating retry %i for request %s %s after "
"sleeping %.2f seconds."
% (num_retries, method, url, sleep_time)
)
)
time.sleep(sleep_time)
else:
if response is not None:
self._record_request_metrics(response, request_start)
return response
else:
raise connection_error
def request(self, method, url, headers, post_data=None):
raise NotImplementedError(
"HTTPClient subclasses must implement `request`"
)
def request_stream(self, method, url, headers, post_data=None):
raise NotImplementedError(
"HTTPClient subclasses must implement `request_stream`"
)
def _should_retry(self, response, api_connection_error, num_retries):
if num_retries >= self._max_network_retries():
return False
if response is None:
# We generally want to retry on timeout and connection
# exceptions, but defer this decision to underlying subclass
# implementations. They should evaluate the driver-specific
# errors worthy of retries, and set flag on the error returned.
return api_connection_error.should_retry
_, status_code, rheaders = response
# The API may ask us not to retry (eg; if doing so would be a no-op)
# or advise us to retry (eg; in cases of lock timeouts); we defer to that.
#
# Note that we expect the headers object to be a CaseInsensitiveDict, as is the case with the requests library.
if rheaders is not None and "stripe-should-retry" in rheaders:
if rheaders["stripe-should-retry"] == "false":
return False
if rheaders["stripe-should-retry"] == "true":
return True
# Retry on conflict errors.
if status_code == 409:
return True
# Retry on 500, 503, and other internal errors.
#
# Note that we expect the stripe-should-retry header to be false
# in most cases when a 500 is returned, since our idempotency framework
# would typically replay it anyway.
if status_code >= 500:
return True
return False
def _max_network_retries(self):
from stripe import max_network_retries
# Configured retries, isolated here for tests
return max_network_retries
def _retry_after_header(self, response=None):
if response is None:
return None
_, _, rheaders = response
try:
return int(rheaders["retry-after"])
except (KeyError, ValueError):
return None
def _sleep_time_seconds(self, num_retries, response=None):
# Apply exponential backoff with initial_network_retry_delay on the
# number of num_retries so far as inputs.
# Do not allow the number to exceed max_network_retry_delay.
sleep_seconds = min(
HTTPClient.INITIAL_DELAY * (2 ** (num_retries - 1)),
HTTPClient.MAX_DELAY,
)
sleep_seconds = self._add_jitter_time(sleep_seconds)
# But never sleep less than the base sleep seconds.
sleep_seconds = max(HTTPClient.INITIAL_DELAY, sleep_seconds)
# And never sleep less than the time the API asks us to wait, assuming it's a reasonable ask.
retry_after = self._retry_after_header(response) or 0
if retry_after <= HTTPClient.MAX_RETRY_AFTER:
sleep_seconds = max(retry_after, sleep_seconds)
return sleep_seconds
def _add_jitter_time(self, sleep_seconds):
# Randomize the value in [(sleep_seconds/ 2) to (sleep_seconds)]
# Also separated method here to isolate randomness for tests
sleep_seconds *= 0.5 * (1 + random.uniform(0, 1))
return sleep_seconds
def _add_telemetry_header(self, headers):
last_request_metrics = getattr(
self._thread_local, "last_request_metrics", None
)
if stripe.enable_telemetry and last_request_metrics:
telemetry = {
"last_request_metrics": last_request_metrics.payload()
}
headers["X-Stripe-Client-Telemetry"] = json.dumps(telemetry)
def _record_request_metrics(self, response, request_start):
_, _, rheaders = response
if "Request-Id" in rheaders and stripe.enable_telemetry:
request_id = rheaders["Request-Id"]
request_duration_ms = _now_ms() - request_start
self._thread_local.last_request_metrics = RequestMetrics(
request_id, request_duration_ms
)
def close(self):
raise NotImplementedError(
"HTTPClient subclasses must implement `close`"
)
class RequestsClient(HTTPClient):
name = "requests"
def __init__(self, timeout=80, session=None, **kwargs):
super(RequestsClient, self).__init__(**kwargs)
self._session = session
self._timeout = timeout
def request(self, method, url, headers, post_data=None):
return self._request_internal(
method, url, headers, post_data, is_streaming=False
)
def request_stream(self, method, url, headers, post_data=None):
return self._request_internal(
method, url, headers, post_data, is_streaming=True
)
def _request_internal(self, method, url, headers, post_data, is_streaming):
kwargs = {}
if self._verify_ssl_certs:
kwargs["verify"] = stripe.ca_bundle_path
else:
kwargs["verify"] = False
if self._proxy:
kwargs["proxies"] = self._proxy
if is_streaming:
kwargs["stream"] = True
if getattr(self._thread_local, "session", None) is None:
self._thread_local.session = self._session or requests.Session()
try:
try:
result = self._thread_local.session.request(
method,
url,
headers=headers,
data=post_data,
timeout=self._timeout,
**kwargs
)
except TypeError as e:
raise TypeError(
"Warning: It looks like your installed version of the "
'"requests" library is not compatible with Stripe\'s '
"usage thereof. (HINT: The most likely cause is that "
'your "requests" library is out of date. You can fix '
'that by running "pip install -U requests".) The '
"underlying error was: %s" % (e,)
)
if is_streaming:
content = result.raw
else:
# This causes the content to actually be read, which could cause
# e.g. a socket timeout. TODO: The other fetch methods probably
# are susceptible to the same and should be updated.
content = result.content
status_code = result.status_code
except Exception as e:
# Would catch just requests.exceptions.RequestException, but can
# also raise ValueError, RuntimeError, etc.
self._handle_request_error(e)
return content, status_code, result.headers
def _handle_request_error(self, e):
# Catch SSL error first as it belongs to ConnectionError,
# but we don't want to retry
if isinstance(e, requests.exceptions.SSLError):
msg = (
"Could not verify Stripe's SSL certificate. Please make "
"sure that your network is not intercepting certificates. "
"If this problem persists, let us know at "
"support@stripe.com."
)
err = "%s: %s" % (type(e).__name__, str(e))
should_retry = False
# Retry only timeout and connect errors; similar to urllib3 Retry
elif isinstance(
e,
(requests.exceptions.Timeout, requests.exceptions.ConnectionError),
):
msg = (
"Unexpected error communicating with Stripe. "
"If this problem persists, let us know at "
"support@stripe.com."
)
err = "%s: %s" % (type(e).__name__, str(e))
should_retry = True
# Catch remaining request exceptions
elif isinstance(e, requests.exceptions.RequestException):
msg = (
"Unexpected error communicating with Stripe. "
"If this problem persists, let us know at "
"support@stripe.com."
)
err = "%s: %s" % (type(e).__name__, str(e))
should_retry = False
else:
msg = (
"Unexpected error communicating with Stripe. "
"It looks like there's probably a configuration "
"issue locally. If this problem persists, let us "
"know at support@stripe.com."
)
err = "A %s was raised" % (type(e).__name__,)
if str(e):
err += " with error message %s" % (str(e),)
else:
err += " with no error message"
should_retry = False
msg = textwrap.fill(msg) + "\n\n(Network error: %s)" % (err,)
raise error.APIConnectionError(msg, should_retry=should_retry)
def close(self):
if getattr(self._thread_local, "session", None) is not None:
self._thread_local.session.close()
class UrlFetchClient(HTTPClient):
name = "urlfetch"
def __init__(self, verify_ssl_certs=True, proxy=None, deadline=55):
super(UrlFetchClient, self).__init__(
verify_ssl_certs=verify_ssl_certs, proxy=proxy
)
# no proxy support in urlfetch. for a patch, see:
# https://code.google.com/p/googleappengine/issues/detail?id=544
if proxy:
raise ValueError(
"No proxy support in urlfetch library. "
"Set stripe.default_http_client to either RequestsClient, "
"PycurlClient, or Urllib2Client instance to use a proxy."
)
self._verify_ssl_certs = verify_ssl_certs
# GAE requests time out after 60 seconds, so make sure to default
# to 55 seconds to allow for a slow Stripe
self._deadline = deadline
def request(self, method, url, headers, post_data=None):
return self._request_internal(
method, url, headers, post_data, is_streaming=False
)
def request_stream(self, method, url, headers, post_data=None):
return self._request_internal(
method, url, headers, post_data, is_streaming=True
)
def _request_internal(self, method, url, headers, post_data, is_streaming):
try:
result = urlfetch.fetch(
url=url,
method=method,
headers=headers,
# Google App Engine doesn't let us specify our own cert bundle.
# However, that's ok because the CA bundle they use recognizes
# api.stripe.com.
validate_certificate=self._verify_ssl_certs,
deadline=self._deadline,
payload=post_data,
)
except urlfetch.Error as e:
self._handle_request_error(e, url)
if is_streaming:
content = util.io.BytesIO(str.encode(result.content))
else:
content = result.content
return content, result.status_code, result.headers
def _handle_request_error(self, e, url):
if isinstance(e, urlfetch.InvalidURLError):
msg = (
"The Stripe library attempted to fetch an "
"invalid URL (%r). This is likely due to a bug "
"in the Stripe Python bindings. Please let us know "
"at support@stripe.com." % (url,)
)
elif isinstance(e, urlfetch.DownloadError):
msg = "There was a problem retrieving data from Stripe."
elif isinstance(e, urlfetch.ResponseTooLargeError):
msg = (
"There was a problem receiving all of your data from "
"Stripe. This is likely due to a bug in Stripe. "
"Please let us know at support@stripe.com."
)
else:
msg = (
"Unexpected error communicating with Stripe. If this "
"problem persists, let us know at support@stripe.com."
)
msg = textwrap.fill(msg) + "\n\n(Network error: " + str(e) + ")"
raise error.APIConnectionError(msg)
def close(self):
pass
class PycurlClient(HTTPClient):
name = "pycurl"
def __init__(self, verify_ssl_certs=True, proxy=None):
super(PycurlClient, self).__init__(
verify_ssl_certs=verify_ssl_certs, proxy=proxy
)
# Initialize this within the object so that we can reuse connections.
self._curl = pycurl.Curl()
# need to urlparse the proxy, since PyCurl
# consumes the proxy url in small pieces
if self._proxy:
# now that we have the parser, get the proxy url pieces
proxy = self._proxy
for scheme, value in six.iteritems(proxy):
proxy[scheme] = urlparse(value)
def parse_headers(self, data):
if "\r\n" not in data:
return {}
raw_headers = data.split("\r\n", 1)[1]
headers = email.message_from_string(raw_headers)
return dict((k.lower(), v) for k, v in six.iteritems(dict(headers)))
def request(self, method, url, headers, post_data=None):
return self._request_internal(
method, url, headers, post_data, is_streaming=False
)
def request_stream(self, method, url, headers, post_data=None):
return self._request_internal(
method, url, headers, post_data, is_streaming=True
)
def _request_internal(self, method, url, headers, post_data, is_streaming):
b = util.io.BytesIO()
rheaders = util.io.BytesIO()
# Pycurl's design is a little weird: although we set per-request
# options on this object, it's also capable of maintaining established
# connections. Here we call reset() between uses to make sure it's in a
# pristine state, but notably reset() doesn't reset connections, so we
# still get to take advantage of those by virtue of re-using the same
# object.
self._curl.reset()
proxy = self._get_proxy(url)
if proxy:
if proxy.hostname:
self._curl.setopt(pycurl.PROXY, proxy.hostname)
if proxy.port:
self._curl.setopt(pycurl.PROXYPORT, proxy.port)
if proxy.username or proxy.password:
self._curl.setopt(
pycurl.PROXYUSERPWD,
"%s:%s" % (proxy.username, proxy.password),
)
if method == "get":
self._curl.setopt(pycurl.HTTPGET, 1)
elif method == "post":
self._curl.setopt(pycurl.POST, 1)
self._curl.setopt(pycurl.POSTFIELDS, post_data)
else:
self._curl.setopt(pycurl.CUSTOMREQUEST, method.upper())
# pycurl doesn't like unicode URLs
self._curl.setopt(pycurl.URL, util.utf8(url))
self._curl.setopt(pycurl.WRITEFUNCTION, b.write)
self._curl.setopt(pycurl.HEADERFUNCTION, rheaders.write)
self._curl.setopt(pycurl.NOSIGNAL, 1)
self._curl.setopt(pycurl.CONNECTTIMEOUT, 30)
self._curl.setopt(pycurl.TIMEOUT, 80)
self._curl.setopt(
pycurl.HTTPHEADER,
["%s: %s" % (k, v) for k, v in six.iteritems(dict(headers))],
)
if self._verify_ssl_certs:
self._curl.setopt(pycurl.CAINFO, stripe.ca_bundle_path)
else:
self._curl.setopt(pycurl.SSL_VERIFYHOST, False)
try:
self._curl.perform()
except pycurl.error as e:
self._handle_request_error(e)
if is_streaming:
b.seek(0)
rcontent = b
else:
rcontent = b.getvalue().decode("utf-8")
rcode = self._curl.getinfo(pycurl.RESPONSE_CODE)
headers = self.parse_headers(rheaders.getvalue().decode("utf-8"))
return rcontent, rcode, headers
def _handle_request_error(self, e):
if e.args[0] in [
pycurl.E_COULDNT_CONNECT,
pycurl.E_COULDNT_RESOLVE_HOST,
pycurl.E_OPERATION_TIMEOUTED,
]:
msg = (
"Could not connect to Stripe. Please check your "
"internet connection and try again. If this problem "
"persists, you should check Stripe's service status at "
"https://twitter.com/stripestatus, or let us know at "
"support@stripe.com."
)
should_retry = True
elif e.args[0] in [pycurl.E_SSL_CACERT, pycurl.E_SSL_PEER_CERTIFICATE]:
msg = (
"Could not verify Stripe's SSL certificate. Please make "
"sure that your network is not intercepting certificates. "
"If this problem persists, let us know at "
"support@stripe.com."
)
should_retry = False
else:
msg = (
"Unexpected error communicating with Stripe. If this "
"problem persists, let us know at support@stripe.com."
)
should_retry = False
msg = textwrap.fill(msg) + "\n\n(Network error: " + e.args[1] + ")"
raise error.APIConnectionError(msg, should_retry=should_retry)
def _get_proxy(self, url):
if self._proxy:
proxy = self._proxy
scheme = url.split(":")[0] if url else None
if scheme:
return proxy.get(scheme, proxy.get(scheme[0:-1]))
return None
def close(self):
pass
class Urllib2Client(HTTPClient):
name = "urllib.request"
def __init__(self, verify_ssl_certs=True, proxy=None):
super(Urllib2Client, self).__init__(
verify_ssl_certs=verify_ssl_certs, proxy=proxy
)
# prepare and cache proxy tied opener here
self._opener = None
if self._proxy:
proxy = urllib.request.ProxyHandler(self._proxy)
self._opener = urllib.request.build_opener(proxy)
def request(self, method, url, headers, post_data=None):
return self._request_internal(
method, url, headers, post_data, is_streaming=False
)
def request_stream(self, method, url, headers, post_data=None):
return self._request_internal(
method, url, headers, post_data, is_streaming=True
)
def _request_internal(self, method, url, headers, post_data, is_streaming):
if six.PY3 and isinstance(post_data, six.string_types):
post_data = post_data.encode("utf-8")
req = urllib.request.Request(url, post_data, headers)
if method not in ("get", "post"):
req.get_method = lambda: method.upper()
try:
# use the custom proxy tied opener, if any.
# otherwise, fall to the default urllib opener.
response = (
self._opener.open(req)
if self._opener
else urllib.request.urlopen(req)
)
if is_streaming:
rcontent = response
else:
rcontent = response.read()
rcode = response.code
headers = dict(response.info())
except urllib.error.HTTPError as e:
rcode = e.code
rcontent = e.read()
headers = dict(e.info())
except (urllib.error.URLError, ValueError) as e:
self._handle_request_error(e)
lh = dict((k.lower(), v) for k, v in six.iteritems(dict(headers)))
return rcontent, rcode, lh
def _handle_request_error(self, e):
msg = (
"Unexpected error communicating with Stripe. "
"If this problem persists, let us know at support@stripe.com."
)
msg = textwrap.fill(msg) + "\n\n(Network error: " + str(e) + ")"
raise error.APIConnectionError(msg)
def close(self):
pass
| {
"repo_name": "stripe/stripe-python",
"path": "stripe/http_client.py",
"copies": "1",
"size": "25692",
"license": "mit",
"hash": 7135038370865611000,
"line_mean": 35.3394625177,
"line_max": 119,
"alpha_frac": 0.5651175463,
"autogenerated": false,
"ratio": 4.302076356329538,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5367193902629538,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import threading
import time
from timeit import default_timer
from ..callbacks import Callback
from ..utils import ignoring
def format_time(t):
"""Format seconds into a human readable form.
>>> format_time(10.4)
'10.4s'
>>> format_time(1000.4)
'16min 40.4s'
"""
m, s = divmod(t, 60)
h, m = divmod(m, 60)
if h:
return '{0:2.0f}hr {1:2.0f}min {2:4.1f}s'.format(h, m, s)
elif m:
return '{0:2.0f}min {1:4.1f}s'.format(m, s)
else:
return '{0:4.1f}s'.format(s)
class ProgressBar(Callback):
"""A progress bar for dask.
Parameters
----------
minimum : int, optional
Minimum time threshold in seconds before displaying a progress bar.
Default is 0 (always display)
width : int, optional
Width of the bar
dt : float, optional
Update resolution in seconds, default is 0.1 seconds
Examples
--------
Below we create a progress bar with a minimum threshold of 1 second before
displaying. For cheap computations nothing is shown:
>>> with ProgressBar(minimum=1.0): # doctest: +SKIP
... out = some_fast_computation.compute()
But for expensive computations a full progress bar is displayed:
>>> with ProgressBar(minimum=1.0): # doctest: +SKIP
... out = some_slow_computation.compute()
[########################################] | 100% Completed | 10.4 s
The duration of the last computation is available as an attribute
>>> pbar = ProgressBar()
>>> with pbar: # doctest: +SKIP
... out = some_computation.compute()
[########################################] | 100% Completed | 10.4 s
>>> pbar.last_duration # doctest: +SKIP
10.4
You can also register a progress bar so that it displays for all
computations:
>>> pbar = ProgressBar() # doctest: +SKIP
>>> pbar.register() # doctest: +SKIP
>>> some_slow_computation.compute() # doctest: +SKIP
[########################################] | 100% Completed | 10.4 s
"""
def __init__(self, minimum=0, width=40, dt=0.1):
self._minimum = minimum
self._width = width
self._dt = dt
self.last_duration = 0
def _start(self, dsk):
self._state = None
self._start_time = default_timer()
# Start background thread
self._running = True
self._timer = threading.Thread(target=self._timer_func)
self._timer.start()
def _pretask(self, key, dsk, state):
self._state = state
sys.stdout.flush()
def _finish(self, dsk, state, errored):
self._running = False
self._timer.join()
elapsed = default_timer() - self._start_time
self.last_duration = elapsed
if elapsed < self._minimum:
return
if not errored:
self._draw_bar(1, elapsed)
else:
self._update_bar(elapsed)
sys.stdout.write('\n')
sys.stdout.flush()
def _timer_func(self):
"""Background thread for updating the progress bar"""
while self._running:
elapsed = default_timer() - self._start_time
if elapsed > self._minimum:
self._update_bar(elapsed)
time.sleep(self._dt)
def _update_bar(self, elapsed):
s = self._state
if not s:
self._draw_bar(0, elapsed)
return
ndone = len(s['finished'])
ntasks = sum(len(s[k]) for k in ['ready', 'waiting', 'running']) + ndone
self._draw_bar(ndone / ntasks if ntasks else 0, elapsed)
def _draw_bar(self, frac, elapsed):
bar = '#' * int(self._width * frac)
percent = int(100 * frac)
elapsed = format_time(elapsed)
msg = '\r[{0:<{1}}] | {2}% Completed | {3}'.format(bar, self._width,
percent, elapsed)
with ignoring(ValueError):
sys.stdout.write(msg)
sys.stdout.flush()
| {
"repo_name": "mikegraham/dask",
"path": "dask/diagnostics/progress.py",
"copies": "3",
"size": "4169",
"license": "bsd-3-clause",
"hash": -5650958207705143000,
"line_mean": 30.3458646617,
"line_max": 80,
"alpha_frac": 0.5423362917,
"autogenerated": false,
"ratio": 3.9108818011257034,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5953218092825703,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import traceback
from contextlib import contextmanager
from functools import wraps
__all__ = ['set_cursor', 'set_cursor_cm', 'messagebox_on_error',
'die_on_error']
def set_cursor(shape):
"""Set the Qt cursor for the duration of a function call, and unset
:param shape: Cursor shape to set.
"""
def wrapper(func):
@wraps(func)
def result(*args, **kwargs):
from glue.utils.qt import get_qapp # Here to avoid circ import
app = get_qapp()
app.setOverrideCursor(shape)
try:
return func(*args, **kwargs)
finally:
app.restoreOverrideCursor()
return result
return wrapper
# TODO: Does this really belong in this module?
@contextmanager
def set_cursor_cm(shape):
"""Context manager equivalent for :func:`set_cursor`."""
from glue.utils.qt import get_qapp
app = get_qapp()
app.setOverrideCursor(shape)
try:
yield
finally:
app.restoreOverrideCursor()
def messagebox_on_error(msg):
"""Decorator that catches exceptions and displays an error message"""
from glue.utils.qt import QMessageBoxPatched as QMessageBox # Must be here
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
m = "%s\n%s" % (msg, e.args[0])
detail = str(traceback.format_exc())
qmb = QMessageBox(QMessageBox.Critical, "Error", m)
qmb.setDetailedText(detail)
qmb.resize(400, qmb.size().height())
qmb.exec_()
return wrapper
return decorator
def die_on_error(msg):
"""Decorator that catches errors, displays a popup message, and quits"""
from glue.utils.qt import QMessageBoxPatched as QMessageBox
def decorator(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
# Make sure application has been started
from glue.utils.qt import get_qapp # Here to avoid circ import
get_qapp()
m = "%s\n%s" % (msg, e)
detail = str(traceback.format_exc())
if len(m) > 500:
detail = "Full message:\n\n%s\n\n%s" % (m, detail)
m = m[:500] + '...'
qmb = QMessageBox(QMessageBox.Critical, "Error", m)
qmb.setDetailedText(detail)
qmb.show()
qmb.raise_()
qmb.exec_()
sys.exit(1)
return wrapper
return decorator
| {
"repo_name": "saimn/glue",
"path": "glue/utils/qt/decorators.py",
"copies": "1",
"size": "2817",
"license": "bsd-3-clause",
"hash": 8421681209490977000,
"line_mean": 29.6195652174,
"line_max": 79,
"alpha_frac": 0.557685481,
"autogenerated": false,
"ratio": 4.154867256637168,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 92
} |
from __future__ import absolute_import, division, print_function
import sys
import uuid
import numpy as np
from matplotlib.colors import ColorConverter
from glue.core.data import Subset
from glue.config import settings
from glue.core.exceptions import IncompatibleAttribute
from .volume_visual import MultiVolume
from .colors import get_translucent_cmap
from .layer_state import VolumeLayerState
from ..common.layer_artist import VispyLayerArtist
class VolumeLayerArtist(VispyLayerArtist):
"""
A layer artist to render volumes.
This is more complex than for other visual types, because for volumes, we
need to manage all the volumes via a single MultiVolume visual class for
each data viewer.
"""
def __init__(self, vispy_viewer=None, layer=None, layer_state=None):
super(VolumeLayerArtist, self).__init__(layer)
self._clip_limits = None
self.layer = layer or layer_state.layer
self.vispy_viewer = vispy_viewer
self.vispy_widget = vispy_viewer._vispy_widget
# TODO: need to remove layers when layer artist is removed
self._viewer_state = vispy_viewer.state
self.state = layer_state or VolumeLayerState(layer=self.layer)
if self.state not in self._viewer_state.layers:
self._viewer_state.layers.append(self.state)
# We create a unique ID for this layer artist, that will be used to
# refer to the layer artist in the MultiVolume. We have to do this
# rather than use self.id because we can't guarantee the latter is
# unique.
self.id = str(uuid.uuid4())
# We need to use MultiVolume instance to store volumes, but we should
# only have one per canvas. Therefore, we store the MultiVolume
# instance in the vispy viewer instance.
if not hasattr(self.vispy_widget, '_multivol'):
# Set whether we are emulating a 3D texture. This needs to be
# enabled as a workaround on Windows otherwise VisPy crashes.
emulate_texture = (sys.platform == 'win32' and
sys.version_info[0] < 3)
multivol = MultiVolume(threshold=0.1, emulate_texture=emulate_texture,
bgcolor=settings.BACKGROUND_COLOR)
self.vispy_widget.add_data_visual(multivol)
self.vispy_widget._multivol = multivol
self._multivol = self.vispy_widget._multivol
self._multivol.allocate(self.id)
try:
self.state.add_callback('*', self._update_from_state, as_kwargs=True)
except TypeError: # glue-core >= 0.11
self.state.add_global_callback(self._update_from_state)
self._update_from_state(**self.state.as_dict())
self.visible = True
@property
def visual(self):
return self._multivol
@property
def bbox(self):
return (-0.5, self.layer.shape[2] - 0.5,
-0.5, self.layer.shape[1] - 0.5,
-0.5, self.layer.shape[0] - 0.5)
@property
def shape(self):
return self.layer.shape
def redraw(self):
"""
Redraw the Vispy canvas
"""
self.vispy_widget.canvas.update()
def clear(self):
"""
Remove the layer artist from the visualization
"""
# We don't want to deallocate here because this can be called if we
# disable the layer due to incompatible attributes
self._multivol.set_data(self.id, np.zeros(self._multivol._data_shape))
def update(self):
"""
Update the visualization to reflect the underlying data
"""
self.redraw()
self._changed = False
def _update_from_state(self, **props):
if 'color' in props:
self._update_cmap_from_color()
if 'vmin' in props or 'vmax' in props:
self._update_limits()
if 'alpha' in props:
self._update_alpha()
if 'attribute' in props or 'subset_mode' in props:
self._update_data()
def _update_cmap_from_color(self):
cmap = get_translucent_cmap(*ColorConverter().to_rgb(self.state.color))
self._multivol.set_cmap(self.id, cmap)
self.redraw()
def _update_limits(self):
self._multivol.set_clim(self.id, (self.state.vmin, self.state.vmax))
self.redraw()
def _update_alpha(self):
self._multivol.set_weight(self.id, self.state.alpha)
self.redraw()
def _update_data(self):
if self.state.attribute is None:
return
if isinstance(self.layer, Subset):
try:
mask = self.layer.to_mask()
except IncompatibleAttribute:
# The following includes a call to self.clear()
self.disable("Subset cannot be applied to this data")
return
else:
self._enabled = True
if self.state.subset_mode == 'outline':
data = mask.astype(float)
else:
data = self.layer.data[self.state.attribute] * mask
else:
data = self.layer[self.state.attribute]
if self._clip_limits is not None:
xmin, xmax, ymin, ymax, zmin, zmax = self._clip_limits
imin, imax = int(np.ceil(xmin)), int(np.ceil(xmax))
jmin, jmax = int(np.ceil(ymin)), int(np.ceil(ymax))
kmin, kmax = int(np.ceil(zmin)), int(np.ceil(zmax))
invalid = -np.inf
data = data.copy()
data[:, :, :imin] = invalid
data[:, :, imax:] = invalid
data[:, :jmin] = invalid
data[:, jmax:] = invalid
data[:kmin] = invalid
data[kmax:] = invalid
self._multivol.set_data(self.id, data)
self.redraw()
def _update_visibility(self):
if self.visible:
self._multivol.enable(self.id)
else:
self._multivol.disable(self.id)
self.redraw()
def set_clip(self, limits):
self._clip_limits = limits
self._update_data()
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/volume/layer_artist.py",
"copies": "1",
"size": "6125",
"license": "bsd-2-clause",
"hash": 6402400258915072000,
"line_mean": 31.4074074074,
"line_max": 82,
"alpha_frac": 0.5947755102,
"autogenerated": false,
"ratio": 3.926282051282051,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5021057561482051,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import warnings
from contextlib import contextmanager
import pytest
from _pytest import compat
def _setoption(wmod, arg):
"""
Copy of the warning._setoption function but does not escape arguments.
"""
parts = arg.split(":")
if len(parts) > 5:
raise wmod._OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append("")
action, message, category, module, lineno = [s.strip() for s in parts]
action = wmod._getaction(action)
category = wmod._getcategory(category)
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise wmod._OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
wmod.filterwarnings(action, message, category, module, lineno)
def pytest_addoption(parser):
group = parser.getgroup("pytest-warnings")
group.addoption(
"-W",
"--pythonwarnings",
action="append",
help="set which warnings to report, see -W option of python itself.",
)
parser.addini(
"filterwarnings",
type="linelist",
help="Each line specifies a pattern for "
"warnings.filterwarnings. "
"Processed after -W and --pythonwarnings.",
)
def pytest_configure(config):
config.addinivalue_line(
"markers",
"filterwarnings(warning): add a warning filter to the given test. "
"see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings ",
)
@contextmanager
def catch_warnings_for_item(config, ihook, when, item):
"""
Context manager that catches warnings generated in the contained execution block.
``item`` can be None if we are not in the context of an item execution.
Each warning captured triggers the ``pytest_warning_captured`` hook.
"""
args = config.getoption("pythonwarnings") or []
inifilters = config.getini("filterwarnings")
with warnings.catch_warnings(record=True) as log:
filters_configured = args or inifilters or sys.warnoptions
for arg in args:
warnings._setoption(arg)
for arg in inifilters:
_setoption(warnings, arg)
if item is not None:
for mark in item.iter_markers(name="filterwarnings"):
for arg in mark.args:
_setoption(warnings, arg)
filters_configured = True
if not filters_configured:
# if user is not explicitly configuring warning filters, show deprecation warnings by default (#2908)
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.filterwarnings("always", category=PendingDeprecationWarning)
yield
for warning_message in log:
ihook.pytest_warning_captured.call_historic(
kwargs=dict(warning_message=warning_message, when=when, item=item)
)
def warning_record_to_str(warning_message):
"""Convert a warnings.WarningMessage to a string, taking in account a lot of unicode shenaningans in Python 2.
When Python 2 support is dropped this function can be greatly simplified.
"""
warn_msg = warning_message.message
unicode_warning = False
if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args):
new_args = []
for m in warn_msg.args:
new_args.append(
compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m
)
unicode_warning = list(warn_msg.args) != new_args
warn_msg.args = new_args
msg = warnings.formatwarning(
warn_msg,
warning_message.category,
warning_message.filename,
warning_message.lineno,
warning_message.line,
)
if unicode_warning:
warnings.warn(
"Warning is using unicode non convertible to ascii, "
"converting to a safe representation:\n %s" % msg,
UnicodeWarning,
)
return msg
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(item):
with catch_warnings_for_item(
config=item.config, ihook=item.ihook, when="runtest", item=item
):
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection(session):
config = session.config
with catch_warnings_for_item(
config=config, ihook=config.hook, when="collect", item=None
):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(terminalreporter):
config = terminalreporter.config
with catch_warnings_for_item(
config=config, ihook=config.hook, when="config", item=None
):
yield
def _issue_config_warning(warning, config):
"""
This function should be used instead of calling ``warnings.warn`` directly when we are in the "configure" stage:
at this point the actual options might not have been set, so we manually trigger the pytest_warning_captured
hook so we can display this warnings in the terminal. This is a hack until we can sort out #2891.
:param warning: the warning instance.
:param config:
"""
with warnings.catch_warnings(record=True) as records:
warnings.simplefilter("always", type(warning))
warnings.warn(warning, stacklevel=2)
config.hook.pytest_warning_captured.call_historic(
kwargs=dict(warning_message=records[0], when="config", item=None)
)
| {
"repo_name": "davidszotten/pytest",
"path": "src/_pytest/warnings.py",
"copies": "1",
"size": "5628",
"license": "mit",
"hash": 4682449723945757000,
"line_mean": 31.7209302326,
"line_max": 116,
"alpha_frac": 0.6478322672,
"autogenerated": false,
"ratio": 4.15350553505535,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.530133780225535,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
__all__ = ['PY3', 'b', 'basestring_', 'bytes', 'next', 'is_unicode',
'iteritems']
PY3 = True if sys.version_info[0] >= 3 else False
if sys.version_info[0] < 3:
def next(obj):
return obj.next()
def iteritems(d, **kw):
return d.iteritems(**kw)
b = bytes = str
basestring_ = basestring
else:
def b(s):
if isinstance(s, str):
return s.encode('latin1')
return bytes(s)
def iteritems(d, **kw):
return iter(d.items(**kw))
next = next
basestring_ = (bytes, str)
bytes = bytes
text = str
def is_unicode(obj):
if sys.version_info[0] < 3:
return isinstance(obj, unicode)
else:
return isinstance(obj, str)
def coerce_text(v):
if not isinstance(v, basestring_):
if sys.version_info[0] < 3:
attr = '__unicode__'
else:
attr = '__str__'
if hasattr(v, attr):
return unicode(v)
else:
return bytes(v)
return v
| {
"repo_name": "MSeifert04/numpy",
"path": "tools/npy_tempita/compat3.py",
"copies": "3",
"size": "1094",
"license": "bsd-3-clause",
"hash": 6284890604499872000,
"line_mean": 18.8909090909,
"line_max": 68,
"alpha_frac": 0.5383912249,
"autogenerated": false,
"ratio": 3.529032258064516,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5567423482964516,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
from blaze.expr.arithmetic import (scalar_coerce, Mult, Add, dshape)
from blaze.expr.math import sin, cos, isnan, exp, log
from blaze.expr import Symbol, eval_str, exprify
from blaze.compatibility import xfail, basestring, raises
from datetime import date, datetime
x = Symbol('x', 'real')
y = Symbol('y', 'real')
def test_basic():
expr = (x + y) * 3
assert eval(str(expr)).isidentical(expr)
assert expr.isidentical(Mult(Add(Symbol('x', 'real'), Symbol('y', 'real')), 3))
def test_eval_str():
expr = (x + y) * 3
assert eval_str(expr) == '(x + y) * 3'
assert eval_str(1) == '1'
assert eval_str('Alice') == "'Alice'"
assert "'Alice'" in eval_str(u'Alice')
print(eval_str(-x))
assert eval_str(-x) == '-x'
assert '~' in eval_str(~x)
def test_str():
x = Symbol('x', 'real')
assert str(x + 10) == 'x + 10'
def test_invert():
x = Symbol('x', 'bool')
expr = ~x
assert expr.op(x).isidentical(expr)
def test_boolean_math_has_boolean_methods():
x = Symbol('x', '?int')
expr = ~(isnan(x)) | (x > 0)
assert eval(str(expr)).isidentical(expr)
def ishashable(x):
try:
hash(x)
return True
except:
return False
def test_Symbol_is_hashable():
assert ishashable(x)
def test_relationals():
x = Symbol('x', 'real')
for expr in [x < 1, x > 1, x == 1, x != 1, x <= 1, x >= 1, ~x]:
assert expr.dshape == dshape('bool')
assert eval(str(expr)) == expr
def test_numbers():
x = Symbol('x', 'real')
y = Symbol('x', 'int')
for expr in [x + 1, x - 1, x * 1, x + y, x - y, x / y, x * y + x + y,
x**y, x**2, 2**x, x % 5, -x,
sin(x), cos(x ** 2), exp(log(y))]:
assert expr.dshape == dshape('real')
assert eval(str(expr)) == expr
assert (-y).dshape == dshape('int')
@xfail(reason="TODO")
def test_neg_dshape_unsigned():
y = Symbol('x', 'uint32')
assert (-y).dshape == dshape('int32')
@xfail(reason="TODO")
def test_arithmetic_dshape_inference():
x = Symbol('x', 'int')
y = Symbol('y', 'int')
assert (x + y).dshape == dshape('int')
def test_date_coercion():
d = Symbol('d', 'date')
expr = d < '2012-01-01'
assert isinstance(expr.rhs, date)
def test_datetime_coercion():
d = Symbol('d', 'datetime')
expr = d > '2012-01-01T12:30:00'
assert isinstance(expr.rhs, datetime)
class TestExprify(object):
dtypes = {'x': 'int', 'y': 'real', 'z': 'int32'}
x = Symbol('x', 'int')
y = Symbol('y', 'real')
z = Symbol('z', 'int32')
name = Symbol('name', 'string')
def test_basic_arithmetic(self):
assert exprify('x + y', self.dtypes).isidentical(self.x + self.y)
other = isnan(sin(x) + y)
assert exprify('isnan(sin(x) + y)', self.dtypes).isidentical(other)
# parsed as a Num in Python 2 and a UnaryOp in Python 3
assert exprify('-1', {}) == -1
# parsed as UnaryOp(op=USub(), operand=1)
assert exprify('-x', self.dtypes).isidentical(-self.x)
assert exprify('-x + y', self.dtypes).isidentical(-self.x + self.y)
other = self.x * self.y + self.z
assert exprify('x * y + z', self.dtypes).isidentical(other)
assert exprify('x ** y', self.dtypes).isidentical(self.x ** self.y)
other = self.x / self.y / self.z + 1
assert exprify('x / y / z + 1', self.dtypes).isidentical(other)
other = self.x / self.y % self.z + 2 ** self.y
assert exprify('x / y % z + 2 ** y', self.dtypes).isidentical(other)
assert exprify('x // y', self.dtypes).isidentical(self.x // self.y)
assert exprify('1 // y // x', self.dtypes).isidentical(
1 // self.y // self.x)
def test_comparison(self):
other = (self.x == 1) | (self.x == 2)
assert exprify('(x == 1) | (x == 2)', self.dtypes).isidentical(other)
def test_simple_boolean_not(self):
other = ~self.x
assert exprify('~x', {'x': 'bool'}).isidentical(other)
def test_literal_string_compare(self):
other = self.name == "Alice"
result = exprify('name == "Alice"', {'name': 'string'})
assert isinstance(result.rhs, basestring)
assert result.isidentical(other)
def test_literal_int_compare(self):
other = self.x == 1
result = exprify('x == 1', self.dtypes)
assert isinstance(result.rhs, int)
assert result.isidentical(other)
def test_literal_float_compare(self):
other = self.y == 1.0
result = exprify('y == 1.0', self.dtypes)
assert isinstance(result.rhs, float)
assert result.isidentical(other)
def test_failing_exprify(self):
dtypes = self.dtypes
with raises(AssertionError):
exprify('x < y < z', dtypes)
with raises(NotImplementedError):
exprify('os.listdir()', {})
with raises(NotImplementedError):
exprify('os.listdir()', {'os': 'int', 'os.listdir': 'real'})
with raises(ValueError):
exprify('__x + __y', {'__x': 'int', '__y': 'real'})
with raises(NotImplementedError):
exprify('y if x else y', dtypes)
def test_functiondef_fail(self):
dtypes = self.dtypes
with raises(NotImplementedError):
exprify('lambda x, y: x + y', dtypes)
with raises(SyntaxError):
exprify('def f(x): return x', dtypes={'x': 'int'})
def test_comprehensions_fail(self):
dtypes = self.dtypes
if sys.version_info < (2, 7):
# dict and set comprehensions are not implemented in Python < 2.7
error = SyntaxError
else:
# and we don't allow them in versions that do
error = NotImplementedError
with raises(error):
exprify('{x: y for z in y}', dtypes)
with raises(error):
exprify('{x for z in y}', dtypes)
with raises(NotImplementedError):
exprify('[x for z in y]', dtypes)
with raises(NotImplementedError):
exprify('(x for y in z)', dtypes)
def test_boolop_fails(self):
dtypes = self.dtypes
with raises(NotImplementedError):
exprify('x or y', dtypes)
with raises(NotImplementedError):
exprify('x and y', dtypes)
with raises(NotImplementedError):
exprify('not x', dtypes)
def test_scope(self):
dtypes = {'sin': 'int'}
with raises(ValueError):
exprify('sin + 1', dtypes)
with raises(TypeError):
sin + 1
def test_scalar_coerce():
assert scalar_coerce('int', 1) == 1
assert scalar_coerce('int', '1') == 1
assert scalar_coerce('{x: int}', '1') == 1
with raises(TypeError):
scalar_coerce('{x: int, y: int}', '1')
with raises(TypeError):
scalar_coerce('3 * int', '1')
assert scalar_coerce('date', 'Jan 1st, 2012') == date(2012, 1, 1)
assert (scalar_coerce('datetime', 'Jan 1st, 2012 12:00:00') ==
datetime(2012, 1, 1, 12, 0, 0))
with raises(ValueError):
scalar_coerce('date', 'Jan 1st, 2012 12:00:00')
assert scalar_coerce('?date', 'Jan 1st, 2012') == date(2012, 1, 1)
assert scalar_coerce('?date', '2012-12-01') == date(2012, 12, 1)
assert scalar_coerce('?date', '') == None
assert scalar_coerce('?int', 0) == 0
assert scalar_coerce('?int', '0') == 0
def test_scalar_name_dtype():
x = Symbol('x', 'int64')
assert x._name == 'x'
assert x.dshape == dshape('int64')
def test_scalar_field():
x = Symbol('x', '{name: string, amount: int64, when: datetime}')
assert 'amount' in dir(x)
assert x.amount.dshape == dshape('int64')
def test_scalar_projection():
x = Symbol('x', '{name: string, amount: int64, when: datetime}')
assert x[['amount', 'when']].dshape == \
dshape('{amount: int64, when: datetime}')
| {
"repo_name": "vitan/blaze",
"path": "blaze/expr/tests/test_scalar.py",
"copies": "1",
"size": "8025",
"license": "bsd-3-clause",
"hash": 6480526744474071000,
"line_mean": 27.8669064748,
"line_max": 83,
"alpha_frac": 0.5621183801,
"autogenerated": false,
"ratio": 3.2875870544858663,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43497054345858666,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
from functools import partial
from . import converters, exceptions, filters, setters, validators
from ._cmp import cmp_using
from ._config import get_run_validators, set_run_validators
from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
from ._make import (
NOTHING,
Attribute,
Factory,
attrib,
attrs,
fields,
fields_dict,
make_class,
validate,
)
from ._version_info import VersionInfo
__version__ = "21.3.0.dev0"
__version_info__ = VersionInfo._from_version_string(__version__)
__title__ = "attrs"
__description__ = "Classes Without Boilerplate"
__url__ = "https://www.attrs.org/"
__uri__ = __url__
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = "Hynek Schlawack"
__email__ = "hs@ox.cx"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2015 Hynek Schlawack"
s = attributes = attrs
ib = attr = attrib
dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
__all__ = [
"Attribute",
"Factory",
"NOTHING",
"asdict",
"assoc",
"astuple",
"attr",
"attrib",
"attributes",
"attrs",
"cmp_using",
"converters",
"evolve",
"exceptions",
"fields",
"fields_dict",
"filters",
"get_run_validators",
"has",
"ib",
"make_class",
"resolve_types",
"s",
"set_run_validators",
"setters",
"validate",
"validators",
]
if sys.version_info[:2] >= (3, 6):
from ._next_gen import define, field, frozen, mutable
__all__.extend((define, field, frozen, mutable))
| {
"repo_name": "python-attrs/attrs",
"path": "src/attr/__init__.py",
"copies": "2",
"size": "1618",
"license": "mit",
"hash": -1905369518122155500,
"line_mean": 19.7435897436,
"line_max": 70,
"alpha_frac": 0.6063040791,
"autogenerated": false,
"ratio": 3.2295409181636727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48358449972636725,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
from functools import partial
from . import converters, exceptions, filters, setters, validators
from ._config import get_run_validators, set_run_validators
from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
from ._make import (
NOTHING,
Attribute,
Factory,
attrib,
attrs,
fields,
fields_dict,
make_class,
validate,
)
from ._version_info import VersionInfo
__version__ = "20.3.0"
__version_info__ = VersionInfo._from_version_string(__version__)
__title__ = "attrs"
__description__ = "Classes Without Boilerplate"
__url__ = "https://www.attrs.org/"
__uri__ = __url__
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = "Hynek Schlawack"
__email__ = "hs@ox.cx"
__license__ = "MIT"
__copyright__ = "Copyright (c) 2015 Hynek Schlawack"
s = attributes = attrs
ib = attr = attrib
dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
__all__ = [
"Attribute",
"Factory",
"NOTHING",
"asdict",
"assoc",
"astuple",
"attr",
"attrib",
"attributes",
"attrs",
"converters",
"evolve",
"exceptions",
"fields",
"fields_dict",
"filters",
"get_run_validators",
"has",
"ib",
"make_class",
"resolve_types",
"s",
"set_run_validators",
"setters",
"validate",
"validators",
]
if sys.version_info[:2] >= (3, 6):
from ._next_gen import define, field, frozen, mutable
__all__.extend((define, field, frozen, mutable))
| {
"repo_name": "pantsbuild/pex",
"path": "pex/vendor/_vendored/attrs/attr/__init__.py",
"copies": "6",
"size": "1568",
"license": "apache-2.0",
"hash": 3841607878516782600,
"line_mean": 19.6315789474,
"line_max": 70,
"alpha_frac": 0.6045918367,
"autogenerated": false,
"ratio": 3.239669421487603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 76
} |
from __future__ import absolute_import, division, print_function
import sys
# importing a pure python package
import requests
"""
Module holding calls to a pure python package retrieved by pip
We use requests as an example of any python package you might want to interface and use in your ROS packages/nodes.
To be able to get ros1_pip_pytemplate packaged on ROS (or even get the built "install space" working),
first one would need to release (or at least build with catkin) a ROS package for requests.
Using catkin_pip for this is probably the easiest solution.
"""
import logging
_logger = logging.getLogger(__name__)
_logger.addHandler(logging.NullHandler())
class Httpbin(object):
"""Unit of testable functionality as a class, calling functionality from other libraries"""
def __init__(self, base_url=None):
self.basename = base_url or 'http://httpbin.org'
# for http connection pooling, see : http://docs.python-requests.org/en/master/user/advanced/#session-objects
self.session = requests.session()
_logger.debug("Httpbin proxy was initialized!")
def get(self, headers=None, params=None):
_logger.info("Sending GET request to {0}...".format(self.basename + '/get'))
return self.session.get(self.basename + '/get', headers=headers or {}, params=params or {})
# Note a more complete version of this is available there :
# https://github.com/asmodehn/pyros-schemas-examples/tree/master/pyros_httpbin
| {
"repo_name": "pyros-dev/ros1_template",
"path": "ros1_pip_pytemplate/ros1_pip_pytemplate/lib_module.py",
"copies": "1",
"size": "1481",
"license": "mit",
"hash": -5082680627964435000,
"line_mean": 41.3142857143,
"line_max": 117,
"alpha_frac": 0.7245104659,
"autogenerated": false,
"ratio": 3.9076517150395778,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5132162180939578,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import numpy as np
from ..extern.vispy import scene
from .axes import AxesVisual3D
from ..utils import NestedSTTransform
from matplotlib.colors import ColorConverter
from glue.config import settings
rgb = ColorConverter().to_rgb
LIMITS_PROPS = [coord + attribute for coord in 'xyz' for attribute in ['_min', '_max', '_stretch']]
class VispyWidgetHelper(object):
def __init__(self, parent=None, viewer_state=None):
# Prepare Vispy canvas. We set the depth_size to 24 to avoid issues
# with isosurfaces on MacOS X
self.canvas = scene.SceneCanvas(keys=None, show=False,
config={'depth_size': 24},
bgcolor=rgb(settings.BACKGROUND_COLOR))
# Set up a viewbox
self.view = self.canvas.central_widget.add_view()
self.view.parent = self.canvas.scene
# Set whether we are emulating a 3D texture. This needs to be enabled
# as a workaround on Windows otherwise VisPy crashes.
self.emulate_texture = (sys.platform == 'win32' and
sys.version_info[0] < 3)
self.scene_transform = scene.STTransform()
self.limit_transforms = {}
fc = rgb(settings.FOREGROUND_COLOR)
self.axis = AxesVisual3D(axis_color=fc, tick_color=fc, text_color=fc,
tick_width=1, minor_tick_length=2,
major_tick_length=4, axis_width=0,
tick_label_margin=10, axis_label_margin=25,
tick_font_size=6, axis_font_size=8,
view=self.view,
transform=self.scene_transform)
# Create a turntable camera. For now, this is the only camerate type
# we support, but if we support more in future, we should implement
# that here
# Orthographic perspective view as default
self.view.camera = scene.cameras.TurntableCamera(parent=self.view.scene,
fov=0., distance=4.0)
# We need to call render here otherwise we'll later encounter an OpenGL
# program validation error.
# self.canvas.render()
self.viewer_state = viewer_state
try:
self.viewer_state.add_callback('*', self._update_from_state, as_kwargs=True)
except TypeError: # glue-core >= 0.11
self.viewer_state.add_global_callback(self._update_from_state)
self._update_from_state(force=True)
def _update_appearance_from_settings(self):
self.canvas.bgcolor = rgb(settings.BACKGROUND_COLOR)
self.axis.axis_color = rgb(settings.FOREGROUND_COLOR)
self.axis.tick_color = rgb(settings.FOREGROUND_COLOR)
self.axis.label_color = rgb(settings.FOREGROUND_COLOR)
def add_data_visual(self, visual):
self.limit_transforms[visual] = NestedSTTransform()
self._update_limits()
visual.transform = self.limit_transforms[visual]
self.view.add(visual)
def _update_from_state(self, force=False, **props):
if force or 'visible_axes' in props:
self._toggle_axes()
if force or 'perspective_view' in props:
self._toggle_perspective()
if force or any(key in props for key in ('x_att', 'y_att', 'z_att')):
self._update_attributes()
if force or any(key in props for key in ('x_stretch', 'y_stretch',
'z_stretch', 'native_aspect')):
self._update_stretch()
if force or any(p in props for p in LIMITS_PROPS) or 'native_aspect' in props:
self._update_limits()
self.canvas.update()
def _toggle_axes(self):
if self.viewer_state.visible_axes:
self.axis.parent = self.view.scene
else:
self.axis.parent = None
def _toggle_perspective(self):
if self.viewer_state.perspective_view:
self.view.camera.fov = 30
self.axis.tick_font_size = 28
self.axis.axis_font_size = 35
else:
self.view.camera.fov = 0
self.axis.tick_font_size = 6
self.axis.axis_font_size = 8
def _update_attributes(self):
if self.viewer_state.x_att is not None:
self.axis.xlabel = self.viewer_state.x_att.label
if self.viewer_state.y_att is not None:
self.axis.ylabel = self.viewer_state.y_att.label
if self.viewer_state.z_att is not None:
self.axis.zlabel = self.viewer_state.z_att.label
def _update_stretch(self):
self.scene_transform.scale = (self.viewer_state.x_stretch * self.viewer_state.aspect[0],
self.viewer_state.y_stretch * self.viewer_state.aspect[1],
self.viewer_state.z_stretch * self.viewer_state.aspect[2])
def _update_limits(self):
dx = self.viewer_state.x_max - self.viewer_state.x_min
sx = (np.inf if dx == 0 else 2. / dx *
self.viewer_state.x_stretch * self.viewer_state.aspect[0])
dy = self.viewer_state.y_max - self.viewer_state.y_min
sy = (np.inf if dy == 0 else 2. / dy *
self.viewer_state.y_stretch * self.viewer_state.aspect[1])
dz = self.viewer_state.z_max - self.viewer_state.z_min
sz = (np.inf if dz == 0 else 2. / dz *
self.viewer_state.z_stretch * self.viewer_state.aspect[2])
scale = [sx, sy, sz]
translate = [-0.5 * (self.viewer_state.x_min + self.viewer_state.x_max) * scale[0],
-0.5 * (self.viewer_state.y_min + self.viewer_state.y_max) * scale[1],
-0.5 * (self.viewer_state.z_min + self.viewer_state.z_max) * scale[2]]
for visual in self.limit_transforms:
self.limit_transforms[visual].scale = scale
self.limit_transforms[visual].translate = translate
self.axis.xlim = self.viewer_state.x_min, self.viewer_state.x_max
self.axis.ylim = self.viewer_state.y_min, self.viewer_state.y_max
self.axis.zlim = self.viewer_state.z_min, self.viewer_state.z_max
| {
"repo_name": "astrofrog/glue-3d-viewer",
"path": "glue_vispy_viewers/common/vispy_widget.py",
"copies": "2",
"size": "6339",
"license": "bsd-2-clause",
"hash": -8496800776629161000,
"line_mean": 39.1202531646,
"line_max": 99,
"alpha_frac": 0.5857390756,
"autogenerated": false,
"ratio": 3.7048509643483345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000765557573785789,
"num_lines": 158
} |
from __future__ import absolute_import, division, print_function
import sys
import numpy as np
from ..extern.vispy import scene
from .axes import AxesVisual3D
from matplotlib.colors import ColorConverter
from glue.config import settings
rgb = ColorConverter().to_rgb
LIMITS_PROPS = [coord + attribute for coord in 'xyz' for attribute in ['_min', '_max', '_stretch']]
class VispyWidgetHelper(object):
def __init__(self, parent=None, viewer_state=None):
# Prepare Vispy canvas. We set the depth_size to 24 to avoid issues
# with isosurfaces on MacOS X
self.canvas = scene.SceneCanvas(keys=None, show=False,
config={'depth_size': 24},
bgcolor=rgb(settings.BACKGROUND_COLOR))
# Set up a viewbox
self.view = self.canvas.central_widget.add_view()
self.view.parent = self.canvas.scene
# Set whether we are emulating a 3D texture. This needs to be enabled
# as a workaround on Windows otherwise VisPy crashes.
self.emulate_texture = (sys.platform == 'win32' and
sys.version_info[0] < 3)
self.scene_transform = scene.STTransform()
self.limit_transforms = {}
fc = rgb(settings.FOREGROUND_COLOR)
self.axis = AxesVisual3D(axis_color=fc, tick_color=fc, text_color=fc,
tick_width=1, minor_tick_length=2,
major_tick_length=4, axis_width=0,
tick_label_margin=10, axis_label_margin=25,
tick_font_size=6, axis_font_size=8,
view=self.view,
transform=self.scene_transform)
# Create a turntable camera. For now, this is the only camerate type
# we support, but if we support more in future, we should implement
# that here
# Orthographic perspective view as default
self.view.camera = scene.cameras.TurntableCamera(parent=self.view.scene,
fov=0., distance=4.0)
# We need to call render here otherwise we'll later encounter an OpenGL
# program validation error.
# self.canvas.render()
self.viewer_state = viewer_state
try:
self.viewer_state.add_callback('*', self._update_from_state, as_kwargs=True)
except TypeError: # glue-core >= 0.11
self.viewer_state.add_global_callback(self._update_from_state)
def _update_appearance_from_settings(self):
self.canvas.bgcolor = rgb(settings.BACKGROUND_COLOR)
self.axis.axis_color = rgb(settings.FOREGROUND_COLOR)
self.axis.tick_color = rgb(settings.FOREGROUND_COLOR)
self.axis.label_color = rgb(settings.FOREGROUND_COLOR)
def add_data_visual(self, visual):
self.limit_transforms[visual] = scene.STTransform()
self._update_limits()
visual.transform = self.limit_transforms[visual]
self.view.add(visual)
def _update_from_state(self, **props):
if 'visible_axes' in props:
self._toggle_axes()
if 'perspective_view' in props:
self._toggle_perspective()
if any(key in props for key in ('x_att', 'y_att', 'z_att')):
self._update_attributes()
if any(key in props for key in ('x_stretch', 'y_stretch', 'z_stretch', 'native_aspect')):
self._update_stretch()
if any(p in props for p in LIMITS_PROPS) or 'native_aspect' in props:
self._update_limits()
self.canvas.update()
def _toggle_axes(self):
if self.viewer_state.visible_axes:
self.axis.parent = self.view.scene
else:
self.axis.parent = None
def _toggle_perspective(self):
if self.viewer_state.perspective_view:
self.view.camera.fov = 30
self.axis.tick_font_size = 28
self.axis.axis_font_size = 35
else:
self.view.camera.fov = 0
self.axis.tick_font_size = 6
self.axis.axis_font_size = 8
def _update_attributes(self):
if self.viewer_state.x_att is not None:
self.axis.xlabel = self.viewer_state.x_att.label
if self.viewer_state.y_att is not None:
self.axis.ylabel = self.viewer_state.y_att.label
if self.viewer_state.z_att is not None:
self.axis.zlabel = self.viewer_state.z_att.label
def _update_stretch(self):
self.scene_transform.scale = (self.viewer_state.x_stretch * self.viewer_state.aspect[0],
self.viewer_state.y_stretch * self.viewer_state.aspect[1],
self.viewer_state.z_stretch * self.viewer_state.aspect[2])
def _update_limits(self):
dx = self.viewer_state.x_max - self.viewer_state.x_min
sx = (np.inf if dx == 0 else 2. / dx *
self.viewer_state.x_stretch * self.viewer_state.aspect[0])
dy = self.viewer_state.y_max - self.viewer_state.y_min
sy = (np.inf if dy == 0 else 2. / dy *
self.viewer_state.y_stretch * self.viewer_state.aspect[1])
dz = self.viewer_state.z_max - self.viewer_state.z_min
sz = (np.inf if dz == 0 else 2. / dz *
self.viewer_state.z_stretch * self.viewer_state.aspect[2])
scale = [sx, sy, sz]
translate = [-0.5 * (self.viewer_state.x_min + self.viewer_state.x_max) * scale[0],
-0.5 * (self.viewer_state.y_min + self.viewer_state.y_max) * scale[1],
-0.5 * (self.viewer_state.z_min + self.viewer_state.z_max) * scale[2]]
for visual in self.limit_transforms:
self.limit_transforms[visual].scale = scale
self.limit_transforms[visual].translate = translate
self.axis.xlim = self.viewer_state.x_min, self.viewer_state.x_max
self.axis.ylim = self.viewer_state.y_min, self.viewer_state.y_max
self.axis.zlim = self.viewer_state.z_min, self.viewer_state.z_max
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/common/vispy_widget.py",
"copies": "1",
"size": "6150",
"license": "bsd-2-clause",
"hash": 8616530923071458000,
"line_mean": 38.6774193548,
"line_max": 99,
"alpha_frac": 0.586504065,
"autogenerated": false,
"ratio": 3.6694510739856803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.475595513898568,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
import psycopg2
from nfldb import connect as nfldb_connect, api_version
from nfldb import Tx, set_timezone
from nfldb.db import _db_name, _mogrify, _bind_type
from nfldb.types import _player_categories, _Enum
from nfldb.team import teams
from nfldbproj.types import ProjEnums
__pdoc__ = {}
nfldbproj_api_version = 1
__pdoc__['nfldbproj_api_version'] = \
"""
The nfldbproj schema version that this library corresponds to. When the schema
version of the database is less than this value, `nfldbproj.connect` will
automatically update the schema to the latest version before doing
anything else.
"""
nfldb_api_version = 7
__pdoc__['nfldb_api_version'] = \
"""
The nfldb schema version that this library corresponds to.
The database will only be updated from this version.
"""
nfldbproj_tables = {
'nfldbproj_meta',
'projection_source',
'fp_system',
'projection_set',
'stat_projection',
'fp_projection',
'dfs_site',
'dfs_salary',
'name_disambiguation',
'fp_score',
'fantasy_player',
}
nfldbproj_types = {
'fantasy_position',
'proj_scope',
'uinteger',
}
def uninstall(db, really_uninstall=False):
"""Remove all traces of nfldb-projections."""
if really_uninstall:
print('Removing all traces of nfldb-projections...', end='')
with Tx(db) as c:
c.execute('DROP TABLE {}'.format(', '.join(nfldbproj_tables)))
c.execute('DROP TYPE {}'.format(', '.join(nfldbproj_types)))
c.execute('DROP FUNCTION add_fantasy_player() CASCADE')
print('done.')
else:
print('Uninstall not executed. Pass keyword argument really_uninstall=True to confirm.')
def connect(**kwargs):
"""
A wrapper around nfldb.connect.
Returns a `psycopg2._psycopg.connection` object from the
`psycopg2.connect` function. If database is `None`, then `connect`
will look for a configuration file using `nfldb.config` with
`config_path`. Otherwise, the connection will use the parameters
given.
If `database` is `None` and no config file can be found, then an
`IOError` exception is raised.
This function will also compare the current schema version of the
database against the API version `nfldb.api_version` and assert
that they are equivalent. If the schema library version is less
than the the API version, then the schema will be automatically
upgraded. If the schema version is newer than the library version,
then this function will raise an assertion error. An assertion
error will also be raised if the schema version is 0 and the
database is not empty.
In addition, a similar updating will be performed for nfldbproj.
N.B. The `timezone` parameter should be set to a value that
PostgreSQL will accept. Select from the `pg_timezone_names` view
to get a list of valid time zones.
"""
conn = nfldb_connect(**kwargs)
# Migration.
nfldbproj_sversion = nfldbproj_schema_version(conn)
assert nfldbproj_sversion <= nfldbproj_api_version, \
'nfldbproj library version {} is older than schema with version {}'.format(
nfldbproj_api_version, nfldbproj_sversion
)
assert api_version == nfldb_api_version, \
'nfldbproj expects nfldb version {}, encountered nfldb version {}'.format(
nfldb_api_version, api_version
)
assert nfldbproj_sversion > 0 or (nfldbproj_sversion == 0 and _nfldbproj_is_empty(conn)), \
'nfldbproj schema has version 0 but is not empty'
set_timezone(conn, 'UTC')
_migrate_nfldbproj(conn, nfldbproj_api_version)
if kwargs.get('timezone'):
set_timezone(conn, kwargs['timezone'])
# Bind SQL -> Python casting functions for additional types.
_bind_type(conn, 'fantasy_position', _Enum._pg_cast(ProjEnums.fantasy_position))
_bind_type(conn, 'proj_scope', _Enum._pg_cast(ProjEnums.proj_scope))
return conn
def nfldbproj_schema_version(conn):
"""
Returns the schema version of the given database. If the version
is not stored in the database, then `0` is returned.
"""
with Tx(conn) as c:
try:
c.execute('SELECT nfldbproj_version FROM nfldbproj_meta LIMIT 1',
['nfldbproj_version'])
except psycopg2.ProgrammingError:
return 0
if c.rowcount == 0:
return 0
return c.fetchone()['nfldbproj_version']
def _nfldbproj_is_empty(conn):
"""
Returns `True` if and only if none of the nfldbproj tables exist in the database.
"""
with Tx(conn) as c:
c.execute('''
SELECT table_name from information_schema.tables
WHERE table_catalog = %s AND table_schema='public'
''', [_db_name(conn)])
table_names = {result['table_name'] for result in c.fetchall()}
return bool(nfldbproj_tables - table_names)
def _category_sql_field(self):
"""
Get a modified SQL definition of a statistical category column.
Unlike in nfldb's tables, we allow NULL statistics,
in order to differentiate between no projection and a projection of zero.
"""
return '{} {} NULL'.format(self.category_id, 'real' if self.is_real else 'smallint')
# What follows are the migration functions. They follow the naming
# convention "_migrate_nfldbproj_{VERSION}" where VERSION is an integer that
# corresponds to the version that the nfldbproj schema will be after the
# migration function runs. Each migration function is only responsible
# for running the queries required to update schema. It does not
# need to update the schema version.
#
# The migration functions should accept a cursor as a parameter,
# which is created in the _migrate function. In particular,
# each migration function is run in its own transaction. Commits
# and rollbacks are handled automatically.
def _migrate_nfldbproj(conn, to):
current = nfldbproj_schema_version(conn)
assert current <= to
globs = globals()
for v in range(current+1, to+1):
fname = '_migrate_nfldbproj_{}'.format(v)
with Tx(conn) as c:
assert fname in globs, 'Migration function {} not defined'.format(v)
globs[fname](c)
c.execute("UPDATE nfldbproj_meta SET nfldbproj_version = %s", (v,))
def _create_enum(c, enum):
c.execute('''
CREATE TYPE {} AS ENUM {}
'''.format(enum.__name__, _mogrify(c, enum)))
def _migrate_nfldbproj_1(c):
print('Adding nfldb-projections tables to the database...', file=sys.stderr)
_create_enum(c, ProjEnums.fantasy_position)
_create_enum(c, ProjEnums.proj_scope)
c.execute('''
CREATE DOMAIN uinteger AS integer
CHECK (VALUE >= 0)
''')
c.execute('''
CREATE TABLE nfldbproj_meta (
nfldbproj_version smallint
)
''')
c.execute('''
INSERT INTO nfldbproj_meta (nfldbproj_version) VALUES (0)
''')
c.execute('''
CREATE TABLE projection_source (
source_name character varying (100) NOT NULL,
source_url character varying (255) NULL,
source_notes text NULL,
PRIMARY KEY (source_name)
)
''')
c.execute('''
CREATE TABLE fp_system (
fpsys_name character varying (100) NOT NULL,
fpsys_url character varying (255) NULL,
PRIMARY KEY (fpsys_name)
)
''')
# Handle stat projections by allowing them to reference a fantasy-point system "None".
c.execute('''
INSERT INTO fp_system (fpsys_name) VALUES ('None')
''')
c.execute('''
CREATE TABLE fantasy_player (
fantasy_player_id character varying (10) NOT NULL,
player_id character varying (10) NULL,
dst_team character varying (3) NULL,
PRIMARY KEY (fantasy_player_id),
FOREIGN KEY (player_id)
REFERENCES player (player_id)
ON DELETE CASCADE
ON UPDATE CASCADE,
FOREIGN KEY (dst_team)
REFERENCES team (team_id)
ON DELETE CASCADE
ON UPDATE CASCADE,
CHECK (
(player_id IS NULL AND dst_team IS NOT NULL AND fantasy_player_id = dst_team) OR
(player_id IS NOT NULL AND dst_team IS NULL AND fantasy_player_id = player_id)
)
)
''')
c.execute('''
CREATE FUNCTION add_fantasy_player() RETURNS trigger AS $add_fantasy_player$
BEGIN
IF TG_TABLE_NAME = 'player' THEN
INSERT INTO fantasy_player (fantasy_player_id, player_id)
VALUES (NEW.player_id, NEW.player_id);
RETURN NEW;
ELSIF TG_TABLE_NAME = 'team' THEN
INSERT INTO fantasy_player (fantasy_player_id, dst_team)
VALUES (NEW.team_id, NEW.team_id);
INSERT INTO name_disambiguation (name_as_scraped, fantasy_player_id)
VALUES (NEW.team_id, NEW.team_id);
RETURN NEW;
END IF;
END;
$add_fantasy_player$ LANGUAGE plpgsql
''')
c.execute('''
CREATE TRIGGER fantasy_player_mirror_player
AFTER INSERT ON player
FOR EACH ROW
EXECUTE PROCEDURE add_fantasy_player()
''')
c.execute('''
CREATE TRIGGER fantasy_player_mirror_team
AFTER INSERT ON team
FOR EACH ROW
EXECUTE PROCEDURE add_fantasy_player()
''')
c.execute('''
INSERT INTO fantasy_player (fantasy_player_id, player_id)
SELECT player_id, player_id FROM player
''')
c.execute('''
INSERT INTO fantasy_player (fantasy_player_id, dst_team)
SELECT team_id, team_id FROM team
''')
c.execute('''
CREATE TABLE dfs_site (
fpsys_name character varying (100) NOT NULL CHECK (fpsys_name != 'None'),
dfs_name character varying (100) NOT NULL,
dfs_url character varying (255) NOT NULL,
PRIMARY KEY (fpsys_name, dfs_name),
FOREIGN KEY (fpsys_name)
REFERENCES fp_system (fpsys_name)
ON DELETE RESTRICT
)
''')
c.execute('''
CREATE TABLE dfs_salary (
fpsys_name character varying (100) NOT NULL,
dfs_name character varying (100) NOT NULL,
fantasy_player_id character varying (10) NOT NULL,
season_year usmallint NOT NULL,
season_type season_phase NOT NULL,
week usmallint NOT NULL,
salary uinteger NOT NULL,
PRIMARY KEY (fpsys_name, dfs_name, fantasy_player_id, season_year, season_type, week),
FOREIGN KEY (fpsys_name, dfs_name)
REFERENCES dfs_site (fpsys_name, dfs_name)
ON DELETE CASCADE,
FOREIGN KEY (fantasy_player_id)
REFERENCES fantasy_player (fantasy_player_id)
ON DELETE RESTRICT
)
''')
c.execute('''
CREATE TABLE projection_set (
source_name character varying (100) NOT NULL,
fpsys_name character varying (100) NOT NULL,
set_id SERIAL NOT NULL,
projection_scope proj_scope NOT NULL,
season_year usmallint NOT NULL,
season_type season_phase NOT NULL,
week usmallint NULL,
date_accessed utctime NOT NULL DEFAULT (now() AT TIME ZONE 'utc'),
known_incomplete bool NOT NULL DEFAULT FALSE,
PRIMARY KEY (source_name, fpsys_name, set_id),
FOREIGN KEY (source_name)
REFERENCES projection_source (source_name)
ON DELETE CASCADE,
FOREIGN KEY (fpsys_name)
REFERENCES fp_system (fpsys_name)
ON DELETE CASCADE
)
''')
c.execute('''
CREATE INDEX projection_set_in_year_phase_week ON projection_set
(season_year DESC, season_type DESC, week DESC)
''')
c.execute('''
CREATE TABLE stat_projection (
source_name character varying (100) NOT NULL,
fpsys_name character varying (100) NOT NULL CHECK (fpsys_name = 'None'),
set_id SERIAL NOT NULL,
fantasy_player_id character varying (10) NOT NULL,
gsis_id gameid NULL,
team character varying (3) NOT NULL,
fantasy_pos fantasy_position NOT NULL,
{},
PRIMARY KEY (source_name, fpsys_name, set_id, fantasy_player_id),
FOREIGN KEY (source_name)
REFERENCES projection_source (source_name)
ON DELETE CASCADE,
FOREIGN KEY (source_name, fpsys_name, set_id)
REFERENCES projection_set (source_name, fpsys_name, set_id)
ON DELETE CASCADE,
FOREIGN KEY (fantasy_player_id)
REFERENCES fantasy_player (fantasy_player_id)
ON DELETE RESTRICT,
FOREIGN KEY (gsis_id)
REFERENCES game (gsis_id)
ON DELETE RESTRICT,
FOREIGN KEY (team)
REFERENCES team (team_id)
ON DELETE RESTRICT
ON UPDATE CASCADE
)
'''.format(
', '.join(_category_sql_field(cat) for cat in _player_categories.values())
))
c.execute('''
CREATE TABLE fp_projection (
source_name character varying (100) NOT NULL,
fpsys_name character varying (100) NOT NULL CHECK (fpsys_name != 'None'),
set_id usmallint NOT NULL,
fantasy_player_id character varying (10) NOT NULL,
gsis_id gameid NULL,
team character varying (3) NOT NULL,
fantasy_pos fantasy_position NOT NULL,
projected_fp real NOT NULL,
fp_variance real NULL CHECK (fp_variance >= 0),
PRIMARY KEY (source_name, fpsys_name, set_id, fantasy_player_id),
FOREIGN KEY (source_name)
REFERENCES projection_source (source_name)
ON DELETE CASCADE,
FOREIGN KEY (source_name, fpsys_name, set_id)
REFERENCES projection_set (source_name, fpsys_name, set_id)
ON DELETE CASCADE,
FOREIGN KEY (fpsys_name)
REFERENCES fp_system (fpsys_name)
ON DELETE CASCADE,
FOREIGN KEY (fantasy_player_id)
REFERENCES fantasy_player (fantasy_player_id)
ON DELETE RESTRICT,
FOREIGN KEY (gsis_id)
REFERENCES game (gsis_id)
ON DELETE RESTRICT,
FOREIGN KEY (team)
REFERENCES team (team_id)
ON DELETE RESTRICT
ON UPDATE CASCADE
)
''')
c.execute('''
CREATE TABLE fp_score (
fpsys_name character varying (100) NOT NULL CHECK (fpsys_name != 'None'),
gsis_id gameid NOT NULL,
fantasy_player_id character varying (10) NOT NULL,
team character varying (3) NOT NULL,
fantasy_pos fantasy_position NOT NULL,
actual_fp real NOT NULL,
PRIMARY KEY (fpsys_name, gsis_id, fantasy_player_id),
FOREIGN KEY (fpsys_name)
REFERENCES fp_system (fpsys_name)
ON DELETE CASCADE,
FOREIGN KEY (gsis_id)
REFERENCES game (gsis_id)
ON DELETE RESTRICT,
FOREIGN KEY (fantasy_player_id)
REFERENCES fantasy_player (fantasy_player_id)
ON DELETE RESTRICT,
FOREIGN KEY (team)
REFERENCES team (team_id)
ON DELETE RESTRICT
ON UPDATE CASCADE
)
''')
c.execute('''
CREATE TABLE name_disambiguation (
name_as_scraped character varying (100) NOT NULL,
fantasy_player_id character varying (10) NOT NULL,
PRIMARY KEY (name_as_scraped),
FOREIGN KEY (fantasy_player_id)
REFERENCES fantasy_player (fantasy_player_id)
ON DELETE CASCADE
)
''')
# Name disambiguations for all team names.
for team_names in teams:
if team_names[0] == 'UNK':
continue
for team_name in team_names:
if team_name == 'New York': # 'New York' remains ambiguous.
continue
c.execute('''
INSERT INTO name_disambiguation (name_as_scraped, fantasy_player_id)
VALUES (%s, %s)
''', (team_name, team_names[0]))
| {
"repo_name": "hsharrison/nfldb-projections",
"path": "nfldbproj/db.py",
"copies": "1",
"size": "16836",
"license": "bsd-2-clause",
"hash": 5764109098854608000,
"line_mean": 35.3628509719,
"line_max": 98,
"alpha_frac": 0.5936089332,
"autogenerated": false,
"ratio": 3.884633133364098,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4978242066564098,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
"""
Module holding testable units of computation
"""
import logging
_logger = logging.getLogger(__name__)
_logger.addHandler(logging.NullHandler())
class Answer(object):
"""Unit of testable functionality as a class
Tested documentation:
>>> a=Answer(6)
>>> a.retrieve()
42
"""
def __init__(self, answer_portion):
self.part = answer_portion
_logger.debug("The Answer was initialized!")
def retrieve(self):
_logger.debug("The Answer is being computed...")
return 7*self.part
class Fibonacci(object):
"""Unit of testable functionality as an iterator
Tested documentation:
>>> fib=Fibonacci(6,7)
>>> fib.next()
13
"""
def __init__(self, init_first, init_second, max=256):
self.n = init_first
self.m = init_second
self.max = max
_logger.debug("The Fibonacci was initialized with {0} and {1}".format(self.n, self.m))
def __iter__(self):
return self
def next(self):
_logger.debug("The next Fibonacci number is being computed...")
if self.n + self.m > self.max:
# wrap around to avoid overflow
l = 1
self.n = 0
else:
l = self.n + self.m
self.n = self.m
self.m = l
_logger.debug("The next Fibonacci number is {0}".format(l))
return l
| {
"repo_name": "pyros-dev/ros1_template",
"path": "ros1_pytemplate/ros1_pytemplate/lib_module.py",
"copies": "1",
"size": "1462",
"license": "mit",
"hash": -8578626913949421000,
"line_mean": 23.3666666667,
"line_max": 94,
"alpha_frac": 0.5861833105,
"autogenerated": false,
"ratio": 3.7106598984771573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47968432089771573,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
if PY3:
import builtins
def apply(f, args, **kwargs):
return f(*args, **kwargs)
else:
import __builtin__ as builtins
apply = builtins.apply
# Portions of this taken from the six library, licensed as follows.
#
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from toolz.compatibility import map, zip, range, reduce
if PY2:
_inttypes = (int, long)
unicode = builtins.unicode
basestring = builtins.basestring
_strtypes = (str, unicode)
else:
_inttypes = (int,)
_strtypes = (str,)
unicode = str
basestring = str
import io
try:
SEEK_END = io.SEEK_END
except AttributeError:
SEEK_END = 2
try:
import pytest
skipif = pytest.mark.skipif
xfail = pytest.mark.xfail
min_python_version = skipif(sys.version_info < (2, 7),
reason="Python >= 2.7 required")
raises = pytest.raises
except ImportError:
# TODO: move the above into a separate testing utils module
pass
if sys.version_info >= (2, 7):
from ctypes import c_ssize_t
else:
import ctypes
if ctypes.sizeof(ctypes.c_void_p) == 4:
c_ssize_t = ctypes.c_int32
else:
c_ssize_t = ctypes.c_int64
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/compatibility.py",
"copies": "2",
"size": "2485",
"license": "bsd-3-clause",
"hash": -8998331448645745000,
"line_mean": 26.6111111111,
"line_max": 80,
"alpha_frac": 0.7014084507,
"autogenerated": false,
"ratio": 3.894984326018809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5596392776718808,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
try:
from glue.viewers.common.qt.data_viewer import DataViewer
except ImportError:
from glue.qt.widgets.data_viewer import DataViewer
from glue.core import message as msg
from glue.utils import nonpartial
from glue.core.state import lookup_class_with_patches
from qtpy import PYQT5, QtWidgets
from .vispy_widget import VispyWidgetHelper
from .viewer_options import VispyOptionsWidget
from .toolbar import VispyViewerToolbar
from .viewer_state import Vispy3DViewerState
from .compat import update_viewer_state
# The following detects whether we are using PyQt5 with Anaconda on Linux, which
# is currently broken. Once https://github.com/ContinuumIO/anaconda-issues/issues/1267
# is fixed, we will no longer need BROKEN_CONDA_PYQT5.
BROKEN_CONDA_PYQT5 = (PYQT5 and sys.platform == 'linux' and
'Continuum Analytics' in sys.version)
BROKEN_CONDA_PYQT5_MESSAGE = "The conda version of PyQt5 on Linux does not include OpenGL support, which means that the 3D viewers will not work. The easiest way to solve this is to manually downgrade to PyQt4. This can normally be done with 'conda install pyqt=4'." # noqa
class BaseVispyViewer(DataViewer):
_state_cls = Vispy3DViewerState
_toolbar_cls = VispyViewerToolbar
tools = ['vispy:reset', 'vispy:save', 'vispy:rotate']
# If imageio is available, we can add the record icon
try:
import imageio # noqa
except ImportError:
pass
else:
tools.insert(1, 'vispy:record')
def __init__(self, session, viewer_state=None, parent=None):
super(BaseVispyViewer, self).__init__(session, parent=parent)
self.state = viewer_state or self._state_cls()
if BROKEN_CONDA_PYQT5:
QtWidgets.QMessageBox.critical(self, "Error", BROKEN_CONDA_PYQT5_MESSAGE)
raise Exception(BROKEN_CONDA_PYQT5_MESSAGE)
self._vispy_widget = VispyWidgetHelper(viewer_state=self.state)
self.setCentralWidget(self._vispy_widget.canvas.native)
self._options_widget = VispyOptionsWidget(parent=self, viewer_state=self.state)
self.state.add_callback('clip_data', nonpartial(self._toggle_clip))
self.status_label = None
self.client = None
# When layer artists are removed from the layer artist container, we need
# to make sure we remove matching layer states in the viewer state
# layers attribute.
self._layer_artist_container.on_changed(nonpartial(self._sync_state_layers))
def _sync_state_layers(self):
# Remove layer state objects that no longer have a matching layer
for layer_state in self.state.layers:
if layer_state.layer not in self._layer_artist_container:
self.state.layers.remove(layer_state)
def register_to_hub(self, hub):
super(BaseVispyViewer, self).register_to_hub(hub)
def subset_has_data(x):
return x.sender.data in self._layer_artist_container.layers
def has_data(x):
return x.sender in self._layer_artist_container.layers
hub.subscribe(self, msg.SubsetCreateMessage,
handler=self._add_subset,
filter=subset_has_data)
hub.subscribe(self, msg.SubsetUpdateMessage,
handler=self._update_subset,
filter=subset_has_data)
hub.subscribe(self, msg.SubsetDeleteMessage,
handler=self._remove_subset,
filter=subset_has_data)
hub.subscribe(self, msg.DataUpdateMessage,
handler=self.update_window_title,
filter=has_data)
hub.subscribe(self, msg.NumericalDataChangedMessage,
handler=self._numerical_data_changed,
filter=has_data)
hub.subscribe(self, msg.ComponentsChangedMessage,
handler=self._update_data,
filter=has_data)
def is_appearance_settings(msg):
return ('BACKGROUND_COLOR' in msg.settings or
'FOREGROUND_COLOR' in msg.settings)
hub.subscribe(self, msg.SettingsChangeMessage,
handler=self._update_appearance_from_settings,
filter=is_appearance_settings)
def unregister(self, hub):
super(BaseVispyViewer, self).unregister(hub)
hub.unsubscribe_all(self)
def _update_appearance_from_settings(self, message):
self._vispy_widget._update_appearance_from_settings()
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
# instance by object viewers later
def add_data(self, data):
return True
def _add_subset(self, message):
pass
def _update_subset(self, message):
if message.subset in self._layer_artist_container:
for layer_artist in self._layer_artist_container[message.subset]:
layer_artist._update_data()
self._vispy_widget.canvas.update()
def _remove_subset(self, message):
if message.subset in self._layer_artist_container:
self._layer_artist_container.pop(message.subset)
self._vispy_widget.canvas.update()
def _update_data(self, message):
if message.data in self._layer_artist_container:
for layer_artist in self._layer_artist_container[message.data]:
layer_artist._update_data()
def _numerical_data_changed(self, message):
for layer_artist in self._layer_artist_container:
layer_artist._update_data()
def _redraw(self):
self._vispy_widget.canvas.render()
def update_window_title(self, *args):
pass
def options_widget(self):
return self._options_widget
@property
def window_title(self):
return self.LABEL
def __gluestate__(self, context):
return dict(state=self.state.__gluestate__(context),
session=context.id(self._session),
size=self.viewer_size,
pos=self.position,
layers=list(map(context.do, self.layers)),
_protocol=1)
@classmethod
def __setgluestate__(cls, rec, context):
if rec.get('_protocol', 0) < 1:
update_viewer_state(rec, context)
session = context.object(rec['session'])
viewer = cls(session)
viewer.register_to_hub(session.hub)
viewer.viewer_size = rec['size']
x, y = rec['pos']
viewer.move(x=x, y=y)
viewer_state = cls._state_cls.__setgluestate__(rec['state'], context)
viewer.state.update_from_state(viewer_state)
# Restore layer artists
for l in rec['layers']:
cls = lookup_class_with_patches(l.pop('_type'))
layer_state = context.object(l['state'])
layer_artist = cls(viewer, layer_state=layer_state)
viewer._layer_artist_container.append(layer_artist)
return viewer
def show_status(self, text):
if not self.status_label:
statusbar = self.statusBar()
self.status_label = QtWidgets.QLabel()
statusbar.addWidget(self.status_label)
self.status_label.setText(text)
def restore_layers(self, layers, context):
pass
def _toggle_clip(self):
for layer_artist in self._layer_artist_container:
if self.state.clip_data:
layer_artist.set_clip(self.state.clip_limits)
else:
layer_artist.set_clip(None)
if PYQT5:
def show(self):
# WORKAROUND:
# Due to a bug in Qt5, a hidden toolbar in glue causes a grey
# rectangle to be overlaid on top of the glue window. Therefore
# we check if the toolbar is hidden, and if so we make it into a
# floating toolbar temporarily - still hidden, so this will not
# be noticeable to the user.
# tbar.setAllowedAreas(Qt.NoToolBarArea)
from qtpy.QtCore import Qt
tbar = self._session.application._mode_toolbar
hidden = tbar.isHidden()
if hidden:
original_flags = tbar.windowFlags()
tbar.setWindowFlags(Qt.Window | Qt.FramelessWindowHint)
super(BaseVispyViewer, self).show()
if hidden:
tbar.setWindowFlags(original_flags)
tbar.hide()
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/common/vispy_data_viewer.py",
"copies": "1",
"size": "8624",
"license": "bsd-2-clause",
"hash": 4175453687368145400,
"line_mean": 33.3585657371,
"line_max": 274,
"alpha_frac": 0.6247680891,
"autogenerated": false,
"ratio": 3.970534069981584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5095302159081584,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys
print(u"Python", sys.version)
del sys
print(u"Executing pythonista_startup...")
try:
print(u"Importing submodules of pythonista_startup...")
import importlib
import sys
import traceback
# Modify this tuple to include the features you want.
# See the individual files for a full description of each submodule.
for name in (
##"_preflight_hook_experiment",
##"customize_sys_hooks",
##"enable_faulthandler",
"patch_stdstreams",
##"restore_types",
):
try:
importlib.import_module("pythonista_startup." + name).run()
except: # Catch everything
print(u"Exception while running submodule {}:".format(name), file=sys.stderr)
traceback.print_exc()
del importlib
del sys
del traceback
del name
print(u"Done importing submodules of pythonista_startup.")
# To disable globals clearing, comment out the next line.
"""
print(u"Preventing globals clearing...")
import sys
import types
class DirAllTheGlobals(types.ModuleType):
import __main__
def __dir__(self):
return dir(type(self).__main__)
# THESE LINES MUST COME LAST.
# Anything past this point is executed in the context of the old pythonista_startup module, which may already be partially garbage-collected.
new_module = DirAllTheGlobals(__name__, __doc__)
vars(new_module).update(vars(sys.modules["pythonista_startup"]))
sys.modules["pythonista_startup"] = new_module
del sys
del types
del DirAllTheGlobals
del new_module
print(u"Done preventing globals clearing.")
#"""
except: # Catch everything
import sys
import traceback
print(u"Swallowed exception:", file=sys.stderr)
traceback.print_exc()
print(u"Trying to reraise.", file=sys.stderr)
del sys
del traceback
raise
print(u"Done executing pythonista_startup.")
| {
"repo_name": "dgelessus/pythonista_startup",
"path": "__init__.py",
"copies": "1",
"size": "2085",
"license": "mit",
"hash": -815765147664286800,
"line_mean": 26.0779220779,
"line_max": 145,
"alpha_frac": 0.6369304556,
"autogenerated": false,
"ratio": 4.161676646706587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.038739370988970706,
"num_lines": 77
} |
from __future__ import absolute_import, division, print_function
import sys
PY3 = sys.version_info[0] > 2
if PY3:
from urllib.request import urlopen
import builtins as builtins
def apply(f, args):
return f(*args)
else:
from urllib2 import urlopen
import __builtin__ as builtins
apply = apply
import sys
import itertools
# Portions of this taken from the six library, licensed as follows.
#
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
PY2 = sys.version_info[0] == 2
if PY2:
import __builtin__
def dict_iteritems(d):
return d.iteritems()
xrange = __builtin__.xrange
from itertools import izip
unicode = __builtin__.unicode
basestring = __builtin__.basestring
reduce = __builtin__.reduce
_strtypes = (str, unicode)
_inttypes = (int, long)
map = itertools.imap
import urlparse
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
else:
def dict_iteritems(d):
return d.items().__iter__()
xrange = range
izip = zip
_inttypes = (int,)
_strtypes = (str,)
unicode = str
map = map
basestring = str
import urllib.parse as urlparse
from functools import reduce
import builtins
exec_ = getattr(builtins, "exec")
if sys.version_info[:2] >= (2, 7):
from ctypes import c_ssize_t
from unittest import skip, skipIf
else:
import ctypes
if ctypes.sizeof(ctypes.c_void_p) == 4:
c_ssize_t = ctypes.c_int32
else:
c_ssize_t = ctypes.c_int64
from nose.plugins.skip import SkipTest
class skip(object):
def __init__(self, reason):
self.reason = reason
def __call__(self, func):
from nose.plugins.skip import SkipTest
def wrapped(*args, **kwargs):
raise SkipTest("Test %s is skipped because: %s" %
(func.__name__, self.reason))
wrapped.__name__ = func.__name__
return wrapped
class skipIf(object):
def __init__(self, condition, reason):
self.condition = condition
self.reason = reason
def __call__(self, func):
if self.condition:
from nose.plugins.skip import SkipTest
def wrapped(*args, **kwargs):
raise SkipTest("Test %s is skipped because: %s" %
(func.__name__, self.reason))
wrapped.__name__ = func.__name__
return wrapped
else:
return func
| {
"repo_name": "aterrel/blaze",
"path": "blaze/compatibility.py",
"copies": "1",
"size": "3945",
"license": "bsd-3-clause",
"hash": -5046822982824579000,
"line_mean": 31.3360655738,
"line_max": 80,
"alpha_frac": 0.6159695817,
"autogenerated": false,
"ratio": 4.2465016146393975,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5362471196339398,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import sys, os
import argparse
import time
import warnings
import numpy as np
from scipy.constants import speed_of_light
from scipy.stats import cauchy
from astropy.table import Table,Column
import astropy.io.fits as pyfits
import multiprocessing
import healpy
from desiutil.log import get_logger
from desispec.io.util import write_bintable
from desispec.io.fibermap import read_fibermap
from desisim.simexp import reference_conditions
from desisim.templates import SIMQSO, QSO
from desisim.scripts.quickspectra import sim_spectra
from desisim.lya_spectra import read_lya_skewers , apply_lya_transmission, apply_metals_transmission, lambda_RF_LYA
from desisim.dla import dla_spec,insert_dlas
from desisim.bal import BAL
from desisim.io import empty_metatable
from desisim.eboss import FootprintEBOSS, sdss_subsample, RedshiftDistributionEBOSS, sdss_subsample_redshift
from desispec.interpolation import resample_flux
from desimodel.io import load_pixweight
from desimodel import footprint
from speclite import filters
from desitarget.cuts import isQSO_colors
from desiutil.dust import SFDMap, ext_odonnell
try:
c = speed_of_light/1000. #- km/s
except TypeError:
#
# This can happen in documentation builds.
#
c = 299792458.0/1000.0
def parse(options=None):
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Fast simulation of QSO Lya spectra into the final DESI format\
(Spectra class) that can be directly used as an input to the redshift fitter\
(redrock) or correlation function code (picca). The input file is a Lya\
transmission skewer fits file which format is described in\
https://desi.lbl.gov/trac/wiki/LymanAlphaWG/LyaSpecSim.")
#- Required
parser.add_argument('-i','--infile', type=str, nargs= "*", required=True, help="Input skewer healpix fits file(s)")
parser.add_argument('-o','--outfile', type=str, required=False, help="Output spectra (only used if single input file)")
parser.add_argument('--outdir', type=str, default=".", required=False, help="Output directory")
#- Optional observing conditions to override program defaults
parser.add_argument('--program', type=str, default="DARK", help="Program (DARK, GRAY or BRIGHT)")
parser.add_argument('--seeing', type=float, default=None, help="Seeing FWHM [arcsec]")
parser.add_argument('--airmass', type=float, default=None, help="Airmass")
parser.add_argument('--exptime', type=float, default=None, help="Exposure time [sec]")
parser.add_argument('--moonfrac', type=float, default=None, help="Moon illumination fraction; 1=full")
parser.add_argument('--moonalt', type=float, default=None, help="Moon altitude [degrees]")
parser.add_argument('--moonsep', type=float, default=None, help="Moon separation to tile [degrees]")
parser.add_argument('--seed', type=int, default=None, required = False, help="Global random seed (will be used to generate a seed per each file")
parser.add_argument('--skyerr', type=float, default=0.0, help="Fractional sky subtraction error")
parser.add_argument('--downsampling', type=float, default=None,help="fractional random down-sampling (value between 0 and 1)")
parser.add_argument('--zmin', type=float, default=0, help="Min redshift")
parser.add_argument('--zmax', type=float, default=10, help="Max redshift")
parser.add_argument('--wmin', type=float, default=3500, help="Min wavelength (obs. frame)")
parser.add_argument('--wmax', type=float, default=10000, help="Max wavelength (obs. frame)")
parser.add_argument('--dwave', type=float, default=0.2, help="Internal wavelength step (don't change this)")
parser.add_argument('--dwave_desi', type=float, default=0.8, help="Output wavelength step for DESI mocks)")
parser.add_argument('--zbest', action = "store_true", help="add a zbest file per spectrum either with the truth\
redshift or adding some error (optionally use it with --sigma_kms_fog and/or --gamma_kms_zfit)")
parser.add_argument('--sigma_kms_fog',type=float,default=150, help="Adds a gaussian error to the quasar \
redshift that simulate the fingers of god effect")
parser.add_argument('--gamma_kms_zfit',nargs='?',type=float,const=400,help="Adds a Lorentzian distributed shift\
to the quasar redshift, to simulate the redshift fitting step. E.g. --gamma_kms_zfit 400 will use a gamma \
parameter of 400 km/s . If a number is not specified, a value of 400 is used.")
parser.add_argument('--shift_kms_los',type=float,default=0,help="Adds a shift to the quasar redshift written in\
the zbest file (in km/s)")
parser.add_argument('--target-selection', action = "store_true" ,help="apply QSO target selection cuts to the simulated quasars")
parser.add_argument('--mags', action = "store_true", help="DEPRECATED; use --bbflux")
parser.add_argument('--bbflux', action = "store_true", help="compute and write the QSO broad-band fluxes in the fibermap")
parser.add_argument('--add-LYB', action='store_true', help = "Add LYB absorption from transmision file")
parser.add_argument('--metals', type=str, default=None, required=False, help = "list of metals to get the\
transmission from, if 'all' runs on all metals", nargs='*')
#parser.add_argument('--metals-from-file', action = 'store_true', help = "add metals from HDU in file")
parser.add_argument('--metals-from-file',type=str,const='all',help = "list of metals,'SI1260,SI1207' etc, to get from HDUs in file. \
Use 'all' or no argument for mock version < 7.3 or final metal runs. ",nargs='?')
parser.add_argument('--dla',type=str,required=False, help="Add DLA to simulated spectra either randonmly\
(--dla random) or from transmision file (--dla file)")
parser.add_argument('--balprob',type=float,required=False, help="To add BAL features with the specified probability\
(e.g --balprob 0.5). Expect a number between 0 and 1 ")
parser.add_argument('--no-simqso',action = "store_true", help="Does not use desisim.templates.SIMQSO\
to generate templates, and uses desisim.templates.QSO instead.")
parser.add_argument('--save-continuum',action = "store_true", help="Save true continum to file")
parser.add_argument('--save-continuum-dwave',type=float, default=2, help="Delta wavelength to save true continum")
parser.add_argument('--desi-footprint', action = "store_true" ,help="select QSOs in DESI footprint")
parser.add_argument('--eboss',action = 'store_true', help='Setup footprint, number density, redshift distribution,\
and exposure time to generate eBOSS-like mocks')
parser.add_argument('--extinction',action='store_true',help='Adds Galactic extinction')
parser.add_argument('--no-transmission',action = 'store_true', help='Do not multiply continuum\
by transmission, use F=1 everywhere')
parser.add_argument('--nproc', type=int, default=1,help="number of processors to run faster")
parser.add_argument('--overwrite', action = "store_true" ,help="rerun if spectra exists (default is skip)")
parser.add_argument('--nmax', type=int, default=None, help="Max number of QSO per input file, for debugging")
parser.add_argument('--save-resolution',action='store_true', help="Save full resolution in spectra file. By default only one matrix is saved in the truth file.")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def mod_cauchy(loc,scale,size,cut):
samples=cauchy.rvs(loc=loc,scale=scale,size=3*size)
samples=samples[abs(samples)<cut]
if len(samples)>=size:
samples=samples[:size]
else:
samples=mod_cauchy(loc,scale,size,cut) ##Only added for the very unlikely case that there are not enough samples after the cut.
return samples
def get_spectra_filename(args,nside,pixel):
if args.outfile :
return args.outfile
filename="{}/{}/spectra-{}-{}.fits".format(pixel//100,pixel,nside,pixel)
return os.path.join(args.outdir,filename)
def get_zbest_filename(args,pixdir,nside,pixel):
if args.zbest :
return os.path.join(pixdir,"zbest-{}-{}.fits".format(nside,pixel))
return None
def get_truth_filename(args,pixdir,nside,pixel):
return os.path.join(pixdir,"truth-{}-{}.fits".format(nside,pixel))
def is_south(dec):
"""Identify which QSOs are in the south vs the north, since these are on
different photometric systems. See
https://github.com/desihub/desitarget/issues/353 for details.
"""
return dec <= 32.125 # constant-declination cut!
def get_healpix_info(ifilename):
"""Read the header of the tranmission file to find the healpix pixel, nside
and if we are lucky the scheme. If it fails, try to guess it from the
filename (for backward compatibility).
Args:
ifilename: full path to input transmission file
Returns:
healpix: HEALPix pixel corresponding to the file
nside: HEALPix nside value
hpxnest: Whether HEALPix scheme in the file was nested
"""
log = get_logger()
print('ifilename',ifilename)
healpix=-1
nside=-1
hpxnest=True
hdulist=pyfits.open(ifilename)
if "METADATA" in hdulist :
head=hdulist["METADATA"].header
for k in ["HPXPIXEL","PIXNUM"] :
if k in head :
healpix=int(head[k])
log.info("healpix={}={}".format(k,healpix))
break
for k in ["HPXNSIDE","NSIDE"] :
if k in head :
nside=int(head[k])
log.info("nside={}={}".format(k,nside))
break
for k in ["HPXNEST","NESTED","SCHEME"] :
if k in head :
if k == "SCHEME" :
hpxnest=(head[k]=="NEST")
else :
hpxnest=bool(head[k])
log.info("hpxnest from {} = {}".format(k,hpxnest))
break
if healpix >= 0 and nside < 0 :
log.error("Read healpix in header but not nside.")
raise ValueError("Read healpix in header but not nside.")
if healpix < 0 :
vals = os.path.basename(ifilename).split(".")[0].split("-")
if len(vals)<3 :
error_msg="Could not guess healpix info from {}".format(ifilename)
log.error(error_msg)
raise ValueError(error_msg)
try :
healpix=int(vals[-1])
nside=int(vals[-2])
except ValueError:
error_msg="Could not guess healpix info from {}".format(ifilename)
log.error(error_msg)
raise ValueError(error_msg)
log.warning("Guessed healpix and nside from filename, assuming the healpix scheme is 'NESTED'")
return healpix, nside, hpxnest
def get_pixel_seed(pixel, nside, global_seed):
if global_seed is None:
# return a random seed
return np.random.randint(2**32, size=1)[0]
npix=healpy.nside2npix(nside)
np.random.seed(global_seed)
seeds = np.unique(np.random.randint(2**32, size=10*npix))[:npix]
pixel_seed = seeds[pixel]
return pixel_seed
def simulate_one_healpix(ifilename,args,model,obsconditions,decam_and_wise_filters,
bassmzls_and_wise_filters,footprint_healpix_weight,
footprint_healpix_nside,
bal=None,sfdmap=None,eboss=None) :
log = get_logger()
# open filename and extract basic HEALPix information
pixel, nside, hpxnest = get_healpix_info(ifilename)
# using global seed (could be None) get seed for this particular pixel
global_seed = args.seed
seed = get_pixel_seed(pixel, nside, global_seed)
# use this seed to generate future random numbers
np.random.seed(seed)
# get output file (we will write there spectra for this HEALPix pixel)
ofilename = get_spectra_filename(args,nside,pixel)
# get directory name (we will also write there zbest file)
pixdir = os.path.dirname(ofilename)
# get filename for truth file
truth_filename = get_truth_filename(args,pixdir,nside,pixel)
# get filename for zbest file
zbest_filename = get_zbest_filename(args,pixdir,nside,pixel)
if not args.overwrite :
# check whether output exists or not
if args.zbest :
if os.path.isfile(ofilename) and os.path.isfile(zbest_filename) :
log.info("skip existing {} and {}".format(ofilename,zbest_filename))
return
else : # only test spectra file
if os.path.isfile(ofilename) :
log.info("skip existing {}".format(ofilename))
return
# create sub-directories if required
if len(pixdir)>0 :
if not os.path.isdir(pixdir) :
log.info("Creating dir {}".format(pixdir))
os.makedirs(pixdir)
if not eboss is None:
dwave_out = None
else:
dwave_out = args.dwave_desi
log.info("Read skewers in {}, random seed = {}".format(ifilename,seed))
# Read transmission from files. It might include DLA information, and it
# might add metal transmission as well (from the HDU file).
log.info("Read transmission file {}".format(ifilename))
trans_wave, transmission, metadata, dla_info = read_lya_skewers(ifilename,read_dlas=(args.dla=='file'),add_metals=args.metals_from_file,add_lyb=args.add_LYB)
### Add Finger-of-God, before generate the continua
log.info("Add FOG to redshift with sigma {} to quasar redshift".format(args.sigma_kms_fog))
DZ_FOG = args.sigma_kms_fog/c*(1.+metadata['Z'])*np.random.normal(0,1,metadata['Z'].size)
metadata['Z'] += DZ_FOG
### Select quasar within a given redshift range
w = (metadata['Z']>=args.zmin) & (metadata['Z']<=args.zmax)
transmission = transmission[w]
metadata = metadata[:][w]
DZ_FOG = DZ_FOG[w]
# option to make for BOSS+eBOSS
if not eboss is None:
if args.downsampling or args.desi_footprint:
raise ValueError("eboss option can not be run with "
+"desi_footprint or downsampling")
# Get the redshift distribution from SDSS
selection = sdss_subsample_redshift(metadata["RA"],metadata["DEC"],metadata['Z'],eboss['redshift'])
log.info("Select QSOs in BOSS+eBOSS redshift distribution {} -> {}".format(metadata['Z'].size,selection.sum()))
if selection.sum()==0:
log.warning("No intersection with BOSS+eBOSS redshift distribution")
return
transmission = transmission[selection]
metadata = metadata[:][selection]
DZ_FOG = DZ_FOG[selection]
# figure out the density of all quasars
N_highz = metadata['Z'].size
# area of healpix pixel, in degrees
area_deg2 = healpy.pixelfunc.nside2pixarea(nside,degrees=True)
input_highz_dens_deg2 = N_highz/area_deg2
selection = sdss_subsample(metadata["RA"], metadata["DEC"],
input_highz_dens_deg2,eboss['footprint'])
log.info("Select QSOs in BOSS+eBOSS footprint {} -> {}".format(transmission.shape[0],selection.size))
if selection.size == 0 :
log.warning("No intersection with BOSS+eBOSS footprint")
return
transmission = transmission[selection]
metadata = metadata[:][selection]
DZ_FOG = DZ_FOG[selection]
if args.desi_footprint :
footprint_healpix = footprint.radec2pix(footprint_healpix_nside, metadata["RA"], metadata["DEC"])
selection = np.where(footprint_healpix_weight[footprint_healpix]>0.99)[0]
log.info("Select QSOs in DESI footprint {} -> {}".format(transmission.shape[0],selection.size))
if selection.size == 0 :
log.warning("No intersection with DESI footprint")
return
transmission = transmission[selection]
metadata = metadata[:][selection]
DZ_FOG = DZ_FOG[selection]
nqso=transmission.shape[0]
if args.downsampling is not None :
if args.downsampling <= 0 or args.downsampling > 1 :
log.error("Down sampling fraction={} must be between 0 and 1".format(args.downsampling))
raise ValueError("Down sampling fraction={} must be between 0 and 1".format(args.downsampling))
indices = np.where(np.random.uniform(size=nqso)<args.downsampling)[0]
if indices.size == 0 :
log.warning("Down sampling from {} to 0 (by chance I presume)".format(nqso))
return
transmission = transmission[indices]
metadata = metadata[:][indices]
DZ_FOG = DZ_FOG[indices]
nqso = transmission.shape[0]
if args.nmax is not None :
if args.nmax < nqso :
log.info("Limit number of QSOs from {} to nmax={} (random subsample)".format(nqso,args.nmax))
# take a random subsample
indices = np.random.choice(np.arange(nqso),args.nmax,replace=False)
transmission = transmission[indices]
metadata = metadata[:][indices]
DZ_FOG = DZ_FOG[indices]
nqso = args.nmax
# In previous versions of the London mocks we needed to enforce F=1 for
# z > z_qso here, but this is not needed anymore. Moreover, now we also
# have metal absorption that implies F < 1 for z > z_qso
#for ii in range(len(metadata)):
# transmission[ii][trans_wave>lambda_RF_LYA*(metadata[ii]['Z']+1)]=1.0
# if requested, add DLA to the transmission skewers
if args.dla is not None :
# if adding random DLAs, we will need a new random generator
if args.dla=='random':
log.info('Adding DLAs randomly')
random_state_just_for_dlas = np.random.RandomState(seed)
elif args.dla=='file':
log.info('Adding DLAs from transmission file')
else:
log.error("Wrong option for args.dla: "+args.dla)
sys.exit(1)
# if adding DLAs, the information will be printed here
dla_filename=os.path.join(pixdir,"dla-{}-{}.fits".format(nside,pixel))
dla_NHI, dla_z, dla_qid,dla_id = [], [], [],[]
# identify minimum Lya redshift in transmission files
min_lya_z = np.min(trans_wave/lambda_RF_LYA - 1)
# loop over quasars in pixel
for ii in range(len(metadata)):
# quasars with z < min_z will not have any DLA in spectrum
if min_lya_z>metadata['Z'][ii]: continue
# quasar ID
idd=metadata['MOCKID'][ii]
dlas=[]
if args.dla=='file':
for dla in dla_info[dla_info['MOCKID']==idd]:
# Adding only DLAs with z < zqso
if dla['Z_DLA_RSD']>=metadata['Z'][ii]: continue
dlas.append(dict(z=dla['Z_DLA_RSD'],N=dla['N_HI_DLA'],dlaid=dla['DLAID']))
transmission_dla = dla_spec(trans_wave,dlas)
elif args.dla=='random':
dlas, transmission_dla = insert_dlas(trans_wave, metadata['Z'][ii], rstate=random_state_just_for_dlas)
for idla in dlas:
idla['dlaid']+=idd*1000 #Added to have unique DLA ids. Same format as DLAs from file.
# multiply transmissions and store information for the DLA file
if len(dlas)>0:
transmission[ii] = transmission_dla * transmission[ii]
dla_z += [idla['z'] for idla in dlas]
dla_NHI += [idla['N'] for idla in dlas]
dla_id += [idla['dlaid'] for idla in dlas]
dla_qid += [idd]*len(dlas)
log.info('Added {} DLAs'.format(len(dla_id)))
# write file with DLA information
if len(dla_id)>0:
dla_meta=Table()
dla_meta['NHI'] = dla_NHI
dla_meta['Z_DLA'] = dla_z #This is Z_DLA_RSD in transmision.
dla_meta['TARGETID']=dla_qid
dla_meta['DLAID'] = dla_id
hdu_dla = pyfits.convenience.table_to_hdu(dla_meta)
hdu_dla.name="DLA_META"
del(dla_meta)
log.info("DLA metadata to be saved in {}".format(truth_filename))
else:
hdu_dla=pyfits.PrimaryHDU()
hdu_dla.name="DLA_META"
# if requested, extend transmission skewers to cover full spectrum
if args.target_selection or args.bbflux :
wanted_min_wave = 3329. # needed to compute magnitudes for decam2014-r (one could have trimmed the transmission file ...)
wanted_max_wave = 55501. # needed to compute magnitudes for wise2010-W2
if trans_wave[0]>wanted_min_wave :
log.info("Increase wavelength range from {}:{} to {}:{} to compute magnitudes".format(int(trans_wave[0]),int(trans_wave[-1]),int(wanted_min_wave),int(trans_wave[-1])))
# pad with ones at short wavelength, we assume F = 1 for z <~ 1.7
# we don't need any wavelength resolution here
new_trans_wave = np.append([wanted_min_wave,trans_wave[0]-0.01],trans_wave)
new_transmission = np.ones((transmission.shape[0],new_trans_wave.size))
new_transmission[:,2:] = transmission
trans_wave = new_trans_wave
transmission = new_transmission
if trans_wave[-1]<wanted_max_wave :
log.info("Increase wavelength range from {}:{} to {}:{} to compute magnitudes".format(int(trans_wave[0]),int(trans_wave[-1]),int(trans_wave[0]),int(wanted_max_wave)))
# pad with ones at long wavelength because we assume F = 1
coarse_dwave = 2. # we don't care about resolution, we just need a decent QSO spectrum, there is no IGM transmission in this range
n = int((wanted_max_wave-trans_wave[-1])/coarse_dwave)+1
new_trans_wave = np.append(trans_wave,np.linspace(trans_wave[-1]+coarse_dwave,trans_wave[-1]+coarse_dwave*(n+1),n))
new_transmission = np.ones((transmission.shape[0],new_trans_wave.size))
new_transmission[:,:trans_wave.size] = transmission
trans_wave = new_trans_wave
transmission = new_transmission
# whether to use QSO or SIMQSO to generate quasar continua. Simulate
# spectra in the north vs south separately because they're on different
# photometric systems.
south = np.where( is_south(metadata['DEC']) )[0]
north = np.where( ~is_south(metadata['DEC']) )[0]
meta, qsometa = empty_metatable(nqso, objtype='QSO', simqso=not args.no_simqso)
if args.no_simqso:
log.info("Simulate {} QSOs with QSO templates".format(nqso))
tmp_qso_flux = np.zeros([nqso, len(model.eigenwave)], dtype='f4')
tmp_qso_wave = np.zeros_like(tmp_qso_flux)
else:
log.info("Simulate {} QSOs with SIMQSO templates".format(nqso))
tmp_qso_flux = np.zeros([nqso, len(model.basewave)], dtype='f4')
tmp_qso_wave = model.basewave
for these, issouth in zip( (north, south), (False, True) ):
# number of quasars in these
nt = len(these)
if nt<=0: continue
if not eboss is None:
# for eBOSS, generate only quasars with r<22
magrange = (17.0, 21.3)
_tmp_qso_flux, _tmp_qso_wave, _meta, _qsometa \
= model.make_templates(nmodel=nt,
redshift=metadata['Z'][these], magrange=magrange,
lyaforest=False, nocolorcuts=True,
noresample=True, seed=seed, south=issouth)
else:
_tmp_qso_flux, _tmp_qso_wave, _meta, _qsometa \
= model.make_templates(nmodel=nt,
redshift=metadata['Z'][these],
lyaforest=False, nocolorcuts=True,
noresample=True, seed=seed, south=issouth)
_meta['TARGETID'] = metadata['MOCKID'][these]
_qsometa['TARGETID'] = metadata['MOCKID'][these]
meta[these] = _meta
qsometa[these] = _qsometa
tmp_qso_flux[these, :] = _tmp_qso_flux
if args.no_simqso:
tmp_qso_wave[these, :] = _tmp_qso_wave
log.info("Resample to transmission wavelength grid")
qso_flux=np.zeros((tmp_qso_flux.shape[0],trans_wave.size))
if args.no_simqso:
for q in range(tmp_qso_flux.shape[0]) :
qso_flux[q]=np.interp(trans_wave,tmp_qso_wave[q],tmp_qso_flux[q])
else:
for q in range(tmp_qso_flux.shape[0]) :
qso_flux[q]=np.interp(trans_wave,tmp_qso_wave,tmp_qso_flux[q])
tmp_qso_flux = qso_flux
tmp_qso_wave = trans_wave
if args.save_continuum :
true_wave=np.linspace(args.wmin,args.wmax,int((args.wmax-args.wmin)/args.save_continuum_dwave)+1)
true_flux=np.zeros((tmp_qso_flux.shape[0],true_wave.size))
for q in range(tmp_qso_flux.shape[0]) :
true_flux[q]=resample_flux(true_wave,tmp_qso_wave,tmp_qso_flux[q])
continum_meta=Table()
continum_meta['TARGETID'] = qsometa['TARGETID']
continum_meta['TRUE_CONT'] = true_flux
hdu_trueCont = pyfits.convenience.table_to_hdu(continum_meta)
hdu_trueCont.name = "TRUE_CONT"
hdu_trueCont.header['wmin'] = args.wmin
hdu_trueCont.header['wmax'] = args.wmax
hdu_trueCont.header['dwave'] = args.save_continuum_dwave
del(continum_meta,true_wave,true_flux)
log.info("True continum to be saved in {}".format(truth_filename))
# if requested, add BAL features to the quasar continua
if args.balprob:
if args.balprob <= 1. and args.balprob > 0:
from desisim.io import find_basis_template
log.info("Adding BALs with probability {}".format(args.balprob))
# save current random state
rnd_state = np.random.get_state()
tmp_qso_flux,meta_bal = bal.insert_bals(tmp_qso_wave, tmp_qso_flux, metadata['Z'],
balprob= args.balprob, seed=seed, qsoid=metadata['MOCKID'])
# restore random state to get the same random numbers later
# as when we don't insert BALs
np.random.set_state(rnd_state)
w = np.in1d(qsometa['TARGETID'], meta_bal['TARGETID'])
qsometa['BAL_TEMPLATEID'][w] = meta_bal['BAL_TEMPLATEID']
hdu_bal=pyfits.convenience.table_to_hdu(meta_bal); hdu_bal.name="BAL_META"
#Trim to only show the version, assuming it is located in os.environ['DESI_BASIS_TEMPLATES']
hdu_bal.header["BALTEMPL"]=find_basis_template(objtype='BAL').split('basis_templates/')[1]
del meta_bal
else:
balstr=str(args.balprob)
log.error("BAL probability is not between 0 and 1 : "+balstr)
sys.exit(1)
# Multiply quasar continua by transmitted flux fraction
# (at this point transmission file might include Ly-beta, metals and DLAs)
log.info("Apply transmitted flux fraction")
if not args.no_transmission:
tmp_qso_flux = apply_lya_transmission(tmp_qso_wave,tmp_qso_flux,
trans_wave,transmission)
# if requested, compute metal transmission on the fly
# (if not included already from the transmission file)
if args.metals is not None:
if args.metals_from_file :
log.error('you cannot add metals twice')
raise ValueError('you cannot add metals twice')
if args.no_transmission:
log.error('you cannot add metals if asking for no-transmission')
raise ValueError('can not add metals if using no-transmission')
lstMetals = ''
for m in args.metals: lstMetals += m+', '
log.info("Apply metals: {}".format(lstMetals[:-2]))
tmp_qso_flux = apply_metals_transmission(tmp_qso_wave,tmp_qso_flux,
trans_wave,transmission,args.metals)
# if requested, compute magnitudes and apply target selection. Need to do
# this calculation separately for QSOs in the north vs south.
bbflux=None
if args.target_selection or args.bbflux :
bands=['FLUX_G','FLUX_R','FLUX_Z', 'FLUX_W1', 'FLUX_W2']
bbflux=dict()
bbflux['SOUTH'] = is_south(metadata['DEC'])
for band in bands:
bbflux[band] = np.zeros(nqso)
# need to recompute the magnitudes to account for lya transmission
log.info("Compute QSO magnitudes")
for these, filters in zip( (~bbflux['SOUTH'], bbflux['SOUTH']),
(bassmzls_and_wise_filters, decam_and_wise_filters) ):
if np.count_nonzero(these) > 0:
maggies = filters.get_ab_maggies(1e-17 * tmp_qso_flux[these, :], tmp_qso_wave)
for band, filt in zip( bands, maggies.colnames ):
bbflux[band][these] = np.ma.getdata(1e9 * maggies[filt]) # nanomaggies
if args.target_selection :
log.info("Apply target selection")
isqso = np.ones(nqso, dtype=bool)
for these, issouth in zip( (~bbflux['SOUTH'], bbflux['SOUTH']), (False, True) ):
if np.count_nonzero(these) > 0:
# optical cuts only if using QSO vs SIMQSO
isqso[these] &= isQSO_colors(gflux=bbflux['FLUX_G'][these],
rflux=bbflux['FLUX_R'][these],
zflux=bbflux['FLUX_Z'][these],
w1flux=bbflux['FLUX_W1'][these],
w2flux=bbflux['FLUX_W2'][these],
south=issouth, optical=args.no_simqso)
log.info("Target selection: {}/{} QSOs selected".format(np.sum(isqso),nqso))
selection=np.where(isqso)[0]
if selection.size==0 : return
tmp_qso_flux = tmp_qso_flux[selection]
metadata = metadata[:][selection]
meta = meta[:][selection]
qsometa = qsometa[:][selection]
DZ_FOG = DZ_FOG[selection]
for band in bands :
bbflux[band] = bbflux[band][selection]
bbflux['SOUTH']=bbflux['SOUTH'][selection]
nqso = selection.size
log.info("Resample to a linear wavelength grid (needed by DESI sim.)")
# careful integration of bins, not just a simple interpolation
qso_wave=np.linspace(args.wmin,args.wmax,int((args.wmax-args.wmin)/args.dwave)+1)
qso_flux=np.zeros((tmp_qso_flux.shape[0],qso_wave.size))
for q in range(tmp_qso_flux.shape[0]) :
qso_flux[q]=resample_flux(qso_wave,tmp_qso_wave,tmp_qso_flux[q])
log.info("Simulate DESI observation and write output file")
if "MOCKID" in metadata.dtype.names :
#log.warning("Using MOCKID as TARGETID")
targetid=np.array(metadata["MOCKID"]).astype(int)
elif "ID" in metadata.dtype.names :
log.warning("Using ID as TARGETID")
targetid=np.array(metadata["ID"]).astype(int)
else :
log.warning("No TARGETID")
targetid=None
specmeta={"HPXNSIDE":nside,"HPXPIXEL":pixel, "HPXNEST":hpxnest}
if args.target_selection or args.bbflux :
fibermap_columns = dict(
FLUX_G = bbflux['FLUX_G'],
FLUX_R = bbflux['FLUX_R'],
FLUX_Z = bbflux['FLUX_Z'],
FLUX_W1 = bbflux['FLUX_W1'],
FLUX_W2 = bbflux['FLUX_W2'],
)
photsys = np.full(len(bbflux['FLUX_G']), 'N', dtype='S1')
photsys[bbflux['SOUTH']] = b'S'
fibermap_columns['PHOTSYS'] = photsys
else :
fibermap_columns=None
# Attenuate the spectra for extinction
if not sfdmap is None:
Rv=3.1 #set by default
indx=np.arange(metadata['RA'].size)
extinction =Rv*ext_odonnell(qso_wave)
EBV = sfdmap.ebv(metadata['RA'],metadata['DEC'], scaling=1.0)
qso_flux *=10**( -0.4 * EBV[indx, np.newaxis] * extinction)
if fibermap_columns is not None:
fibermap_columns['EBV']=EBV
EBV0=0.0
EBV_med=np.median(EBV)
Ag = 3.303 * (EBV_med - EBV0)
exptime_fact=np.power(10.0, (2.0 * Ag / 2.5))
obsconditions['EXPTIME']*=exptime_fact
log.info("Dust extinction added")
log.info('exposure time adjusted to {}'.format(obsconditions['EXPTIME']))
if args.eboss:
specsim_config_file = 'eboss'
else:
specsim_config_file = 'desi'
### use Poisson = False to get reproducible results.
### use args.save_resolution = False to not save the matrix resolution per quasar in spectra files.
resolution=sim_spectra(qso_wave,qso_flux, args.program, obsconditions=obsconditions,spectra_filename=ofilename,
sourcetype="qso", skyerr=args.skyerr,ra=metadata["RA"],dec=metadata["DEC"],targetid=targetid,
meta=specmeta,seed=seed,fibermap_columns=fibermap_columns,use_poisson=False,
specsim_config_file=specsim_config_file, dwave_out=dwave_out, save_resolution=args.save_resolution)
### Keep input redshift
Z_spec = metadata['Z'].copy()
Z_input = metadata['Z'].copy()-DZ_FOG
### Add a shift to the redshift, simulating the systematic imprecision of redrock
DZ_sys_shift = args.shift_kms_los/c*(1.+Z_input)
log.info('Added a shift of {} km/s to the redshift'.format(args.shift_kms_los))
meta['REDSHIFT'] += DZ_sys_shift
metadata['Z'] += DZ_sys_shift
### Add a shift to the redshift, simulating the statistic imprecision of redrock
if args.gamma_kms_zfit:
log.info("Added zfit error with gamma {} to zbest".format(args.gamma_kms_zfit))
DZ_stat_shift = mod_cauchy(loc=0,scale=args.gamma_kms_zfit,size=nqso,cut=3000)/c*(1.+Z_input)
meta['REDSHIFT'] += DZ_stat_shift
metadata['Z'] += DZ_stat_shift
## Write the truth file, including metadata for DLAs and BALs
log.info('Writing a truth file {}'.format(truth_filename))
meta.rename_column('REDSHIFT','Z')
meta.add_column(Column(Z_spec,name='TRUEZ'))
meta.add_column(Column(Z_input,name='Z_INPUT'))
meta.add_column(Column(DZ_FOG,name='DZ_FOG'))
meta.add_column(Column(DZ_sys_shift,name='DZ_SYS'))
if args.gamma_kms_zfit:
meta.add_column(Column(DZ_stat_shift,name='DZ_STAT'))
if 'Z_noRSD' in metadata.dtype.names:
meta.add_column(Column(metadata['Z_noRSD'],name='Z_NORSD'))
else:
log.info('Z_noRSD field not present in transmission file. Z_NORSD not saved to truth file')
#Save global seed and pixel seed to primary header
hdr=pyfits.Header()
hdr['GSEED']=global_seed
hdr['PIXSEED']=seed
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*nanomaggies.*")
hdu = pyfits.convenience.table_to_hdu(meta)
hdu.header['EXTNAME'] = 'TRUTH'
hduqso=pyfits.convenience.table_to_hdu(qsometa)
hduqso.header['EXTNAME'] = 'TRUTH_QSO'
hdulist=pyfits.HDUList([pyfits.PrimaryHDU(header=hdr),hdu,hduqso])
if args.dla :
hdulist.append(hdu_dla)
if args.balprob :
hdulist.append(hdu_bal)
if args.save_continuum :
hdulist.append(hdu_trueCont)
# Save one resolution matrix per camera to the truth file instead of one per quasar to the spectra files.
if not args.save_resolution:
for band in resolution.keys():
hdu = pyfits.ImageHDU(name="{}_RESOLUTION".format(band.upper()))
hdu.data = resolution[band].astype("f4")
hdulist.append(hdu)
hdulist.writeto(truth_filename, overwrite=True)
hdulist.close()
if args.zbest :
log.info("Read fibermap")
fibermap = read_fibermap(ofilename)
log.info("Writing a zbest file {}".format(zbest_filename))
columns = [
('CHI2', 'f8'),
('COEFF', 'f8' , (4,)),
('Z', 'f8'),
('ZERR', 'f8'),
('ZWARN', 'i8'),
('SPECTYPE', (str,96)),
('SUBTYPE', (str,16)),
('TARGETID', 'i8'),
('DELTACHI2', 'f8'),
('BRICKNAME', (str,8))]
zbest = Table(np.zeros(nqso, dtype=columns))
zbest['CHI2'][:] = 0.
zbest['Z'][:] = metadata['Z']
zbest['ZERR'][:] = 0.
zbest['ZWARN'][:] = 0
zbest['SPECTYPE'][:] = 'QSO'
zbest['SUBTYPE'][:] = ''
zbest['TARGETID'][:] = metadata['MOCKID']
zbest['DELTACHI2'][:] = 25.
hzbest = pyfits.convenience.table_to_hdu(zbest); hzbest.name='ZBEST'
hfmap = pyfits.convenience.table_to_hdu(fibermap); hfmap.name='FIBERMAP'
hdulist =pyfits.HDUList([pyfits.PrimaryHDU(),hzbest,hfmap])
hdulist.writeto(zbest_filename, overwrite=True)
hdulist.close() # see if this helps with memory issue
def _func(arg) :
""" Used for multiprocessing.Pool """
return simulate_one_healpix(**arg)
def main(args=None):
log = get_logger()
if isinstance(args, (list, tuple, type(None))):
args = parse(args)
if args.outfile is not None and len(args.infile)>1 :
log.error("Cannot specify single output file with multiple inputs, use --outdir option instead")
return 1
if not os.path.isdir(args.outdir) :
log.info("Creating dir {}".format(args.outdir))
os.makedirs(args.outdir)
if args.mags :
log.warning('--mags is deprecated; please use --bbflux instead')
args.bbflux = True
exptime = args.exptime
if exptime is None :
exptime = 1000. # sec
if args.eboss:
exptime = 1000. # sec (added here in case we change the default)
#- Generate obsconditions with args.program, then override as needed
obsconditions = reference_conditions[args.program.upper()]
if args.airmass is not None:
obsconditions['AIRMASS'] = args.airmass
if args.seeing is not None:
obsconditions['SEEING'] = args.seeing
if exptime is not None:
obsconditions['EXPTIME'] = exptime
if args.moonfrac is not None:
obsconditions['MOONFRAC'] = args.moonfrac
if args.moonalt is not None:
obsconditions['MOONALT'] = args.moonalt
if args.moonsep is not None:
obsconditions['MOONSEP'] = args.moonsep
if args.no_simqso:
log.info("Load QSO model")
model=QSO()
else:
log.info("Load SIMQSO model")
#lya_simqso_model.py is located in $DESISIM/py/desisim/scripts/.
#Uses a different emmision lines model than the default SIMQSO.
#We will update this soon to match with the one used in select_mock_targets.
model=SIMQSO(nproc=1,sqmodel='lya_simqso_model')
decam_and_wise_filters = None
bassmzls_and_wise_filters = None
if args.target_selection or args.bbflux :
log.info("Load DeCAM and WISE filters for target selection sim.")
# ToDo @moustakas -- load north/south filters
decam_and_wise_filters = filters.load_filters('decam2014-g', 'decam2014-r', 'decam2014-z',
'wise2010-W1', 'wise2010-W2')
bassmzls_and_wise_filters = filters.load_filters('BASS-g', 'BASS-r', 'MzLS-z',
'wise2010-W1', 'wise2010-W2')
footprint_healpix_weight = None
footprint_healpix_nside = None
if args.desi_footprint :
if not 'DESIMODEL' in os.environ :
log.error("To apply DESI footprint, I need the DESIMODEL variable to find the file $DESIMODEL/data/footprint/desi-healpix-weights.fits")
sys.exit(1)
footprint_filename=os.path.join(os.environ['DESIMODEL'],'data','footprint','desi-healpix-weights.fits')
if not os.path.isfile(footprint_filename):
log.error("Cannot find $DESIMODEL/data/footprint/desi-healpix-weights.fits")
sys.exit(1)
pixmap=pyfits.open(footprint_filename)[0].data
footprint_healpix_nside=256 # same resolution as original map so we don't loose anything
footprint_healpix_weight = load_pixweight(footprint_healpix_nside, pixmap=pixmap)
if args.gamma_kms_zfit and not args.zbest:
log.info("Setting --zbest to true as required by --gamma_kms_zfit")
args.zbest = True
if args.extinction:
sfdmap= SFDMap()
else:
sfdmap=None
if args.balprob:
bal=BAL()
else:
bal=None
if args.eboss:
eboss = { 'footprint':FootprintEBOSS(), 'redshift':RedshiftDistributionEBOSS() }
else:
eboss = None
if args.nproc > 1:
func_args = [ {"ifilename":filename , \
"args":args, "model":model , \
"obsconditions":obsconditions , \
"decam_and_wise_filters": decam_and_wise_filters , \
"bassmzls_and_wise_filters": bassmzls_and_wise_filters , \
"footprint_healpix_weight": footprint_healpix_weight , \
"footprint_healpix_nside": footprint_healpix_nside , \
"bal":bal,"sfdmap":sfdmap,"eboss":eboss \
} for i,filename in enumerate(args.infile) ]
pool = multiprocessing.Pool(args.nproc)
pool.map(_func, func_args)
else:
for i,ifilename in enumerate(args.infile) :
simulate_one_healpix(ifilename,args,model,obsconditions,
decam_and_wise_filters,bassmzls_and_wise_filters,
footprint_healpix_weight,footprint_healpix_nside,
bal=bal,sfdmap=sfdmap,eboss=eboss)
| {
"repo_name": "desihub/desisim",
"path": "py/desisim/scripts/quickquasars.py",
"copies": "1",
"size": "41663",
"license": "bsd-3-clause",
"hash": -7205175175780028000,
"line_mean": 43.4168443497,
"line_max": 179,
"alpha_frac": 0.6241509253,
"autogenerated": false,
"ratio": 3.369157366973961,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.942633400647868,
"avg_score": 0.013394857159056034,
"num_lines": 938
} |
from __future__ import absolute_import, division, print_function
import tempfile
from contextlib import contextmanager
import os
import zlib
from mock import MagicMock
from ... import core
from ...core.application_base import Application
@contextmanager
def make_file(contents, suffix, decompress=False):
"""Context manager to write data to a temporary file,
and delete on exit
:param contents: Data to write. string
:param suffix: File suffix. string
"""
if decompress:
contents = zlib.decompress(contents)
try:
_, fname = tempfile.mkstemp(suffix=suffix)
with open(fname, 'wb') as outfile:
outfile.write(contents)
yield fname
finally:
try:
os.unlink(fname)
except WindowsError: # on Windows the unlink can fail
pass
@contextmanager
def simple_catalog():
"""Context manager to create a temporary data file
:param suffix: File suffix. string
"""
with make_file(b'#a, b\n1, 2\n3, 4', '.csv') as result:
yield result
def simple_session():
collect = core.data_collection.DataCollection()
hub = core.hub.Hub()
result = core.Session(data_collection=collect, hub=hub,
application=MagicMock(Application),
command_stack=core.CommandStack())
result.command_stack.session = result
return result
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/tests/util.py",
"copies": "1",
"size": "1409",
"license": "bsd-3-clause",
"hash": 2740180884512373000,
"line_mean": 25.0925925926,
"line_max": 64,
"alpha_frac": 0.6501064585,
"autogenerated": false,
"ratio": 4.375776397515528,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5525882856015528,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tempfile
from contextlib import contextmanager
import shutil
from into.backends.csv import CSV
from into.directory import Directory, discover, resource, _Directory
from into import into
from datashape import dshape
import os
@contextmanager
def csvs(n=3):
path = tempfile.mktemp()
os.mkdir(path)
fns = [os.path.join(path, 'file_%d.csv' % i) for i in range(n)]
for i, fn in enumerate(fns):
into(fn, [{'a': i, 'b': j} for j in range(5)])
try:
yield path + os.path.sep
finally:
shutil.rmtree(path)
def test_discover():
with csvs() as path:
d = Directory(CSV)(path)
assert discover(d) == dshape('var * {a: int64, b: int64}')
def test_resource_directory():
with csvs() as path:
r = resource(path)
assert type(r) == Directory(CSV)
assert r.path.rstrip(os.path.sep) == path.rstrip(os.path.sep)
r2 = resource(os.path.join(path, '*.csv'))
assert type(r) == Directory(CSV)
assert r2.path.rstrip(os.path.sep) == path.rstrip(os.path.sep)
def test_resource_directory():
assert isinstance(resource('/a/directory/that/doesnt/exist/'), _Directory)
| {
"repo_name": "mrocklin/into",
"path": "into/tests/test_directory.py",
"copies": "1",
"size": "1240",
"license": "bsd-3-clause",
"hash": -3953611761775073000,
"line_mean": 25.9565217391,
"line_max": 78,
"alpha_frac": 0.6467741935,
"autogenerated": false,
"ratio": 3.387978142076503,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4534752335576503,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tempfile
from contextlib import contextmanager
import shutil
from odo.backends.csv import CSV
from odo.directory import Directory, discover, resource, _Directory
from odo import into
from datashape import dshape
import os
@contextmanager
def csvs(n=3):
path = tempfile.mktemp()
os.mkdir(path)
fns = [os.path.join(path, 'file_%d.csv' % i) for i in range(n)]
for i, fn in enumerate(fns):
into(fn, [{'a': i, 'b': j} for j in range(5)])
try:
yield path + os.path.sep
finally:
shutil.rmtree(path)
def test_discover():
with csvs() as path:
d = Directory(CSV)(path)
assert discover(d) == dshape('var * {a: int64, b: int64}')
def test_resource_directory():
with csvs() as path:
r = resource(path)
assert type(r) == Directory(CSV)
assert r.path.rstrip(os.path.sep) == path.rstrip(os.path.sep)
r2 = resource(os.path.join(path, '*.csv'))
assert type(r2) == Directory(CSV)
assert r2.path.rstrip(os.path.sep) == path.rstrip(os.path.sep)
def test_resource_directory():
assert isinstance(resource(os.path.join('a', 'nonexistent', 'directory') +
os.path.sep),
_Directory)
| {
"repo_name": "alexmojaki/odo",
"path": "odo/tests/test_directory.py",
"copies": "9",
"size": "1317",
"license": "bsd-3-clause",
"hash": -4346740705513658000,
"line_mean": 26.4375,
"line_max": 78,
"alpha_frac": 0.6188306758,
"autogenerated": false,
"ratio": 3.4386422976501305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8557472973450131,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tempfile
import os
import inspect
from itertools import islice
from contextlib import contextmanager
from collections import Iterator
import psutil
import numpy as np
# Imports that replace older utils.
from cytoolz import count, unique, partition_all, nth, groupby, reduceby
from blaze.compatibility import map, zip
def nth_list(n, seq):
"""
>>> tuple(nth_list([0, 1, 4], 'Hello'))
('H', 'e', 'o')
>>> tuple(nth_list([4, 1, 0], 'Hello'))
('o', 'e', 'H')
>>> tuple(nth_list([0, 0, 0], 'Hello'))
('H', 'H', 'H')
"""
seq = iter(seq)
sn = sorted(n)
result = []
old = 0
item = next(seq)
for index in sorted(n):
for i in range(index - old):
item = next(seq)
result.append(item)
old = index
order = [x[1] for x in sorted(zip(n, range(len(n))))]
return (result[i] for i in order)
def get(ind, coll, lazy=False):
"""
>>> get(0, 'Hello')
'H'
>>> get([1, 0], 'Hello')
('e', 'H')
>>> get(slice(1, 4), 'Hello')
('e', 'l', 'l')
>>> get(slice(1, 4), 'Hello', lazy=True) # doctest: +SKIP
<itertools.islice object at 0x25ac470>
"""
if isinstance(ind, list):
result = nth_list(ind, coll)
elif isinstance(ind, slice):
result = islice(coll, ind.start, ind.stop, ind.step)
else:
if isinstance(coll, Iterator):
result = nth(ind, coll)
else:
result = coll[ind]
if lazy==False and isinstance(result, Iterator):
result = tuple(result)
return result
def ndget(ind, data):
"""
Get from N-Dimensional getable
Can index with elements, lists, or slices. Mimic's numpy fancy indexing on
generic indexibles.
>>> data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
>>> ndget(0, data)
[[1, 2], [3, 4]]
>>> ndget((0, 1), data)
[3, 4]
>>> ndget((0, 0, 0), data)
1
>>> ndget((slice(0, 2), [0, 1], 0), data)
((1, 3), (5, 7))
"""
if isinstance(ind, tuple) and len(ind) == 1:
ind = ind[0]
if not isinstance(ind, tuple):
return get(ind, data)
result = get(ind[0], data)
if isinstance(ind[0], (list, slice)):
return type(result)(ndget(ind[1:], row) for row in result)
else:
return ndget(ind[1:], result)
@contextmanager
def filetext(text, extension='', open=open, mode='wt'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def filetexts(d, open=open):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
"""
for filename, text in d.items():
f = open(filename, 'wt')
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
os.remove(filename)
@contextmanager
def tmpfile(extension=''):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension)
yield filename
try:
if os.path.exists(filename):
os.remove(filename)
except OSError: # Sometimes Windows can't close files
if os.name == 'nt':
os.close(handle)
try:
os.remove(filename)
except OSError: # finally give up
pass
def raises(err, lamda):
try:
lamda()
return False
except err:
return True
def keywords(func):
""" Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
"""
if isinstance(func, type):
return keywords(func.__init__)
return inspect.getargspec(func).args
def assert_allclose(lhs, rhs):
for tb in map(zip, lhs, rhs):
for left, right in tb:
if isinstance(left, (np.floating, float)):
# account for nans
assert np.all(np.isclose(left, right, equal_nan=True))
else:
assert left == right
def example(filename, datapath=os.path.join('examples', 'data')):
import blaze
return os.path.join(os.path.dirname(blaze.__file__), datapath, filename)
def available_memory():
return psutil.virtual_memory().available
| {
"repo_name": "vitan/blaze",
"path": "blaze/utils.py",
"copies": "1",
"size": "4633",
"license": "bsd-3-clause",
"hash": 475092478554972860,
"line_mean": 22.398989899,
"line_max": 79,
"alpha_frac": 0.5458666091,
"autogenerated": false,
"ratio": 3.5776061776061776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.962251986122495,
"avg_score": 0.0001905850962454736,
"num_lines": 198
} |
from __future__ import absolute_import, division, print_function
import tempfile
import pytest
import stripe
TEST_RESOURCE_ID = "file_123"
class TestFile(object):
@pytest.fixture(scope="function")
def setup_upload_api_base(self):
stripe.upload_api_base = stripe.api_base
stripe.api_base = None
yield
stripe.api_base = stripe.upload_api_base
stripe.upload_api_base = "https://files.stripe.com"
def test_is_listable(self, request_mock):
resources = stripe.File.list()
request_mock.assert_requested("get", "/v1/files")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.File)
def test_is_retrievable(self, request_mock):
resource = stripe.File.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested("get", "/v1/files/%s" % TEST_RESOURCE_ID)
assert isinstance(resource, stripe.File)
def test_is_creatable(self, setup_upload_api_base, request_mock):
stripe.multipart_data_generator.MultipartDataGenerator._initialize_boundary = (
lambda self: 1234567890
)
test_file = tempfile.TemporaryFile()
resource = stripe.File.create(
purpose="dispute_evidence",
file=test_file,
file_link_data={"create": True},
)
request_mock.assert_api_base(stripe.upload_api_base)
request_mock.assert_requested(
"post",
"/v1/files",
headers={
"Content-Type": "multipart/form-data; boundary=1234567890"
},
)
assert isinstance(resource, stripe.File)
def test_create_respects_stripe_version(
self, setup_upload_api_base, request_mock
):
test_file = tempfile.TemporaryFile()
stripe.File.create(
purpose="dispute_evidence", file=test_file, stripe_version="foo"
)
request_mock.assert_api_version("foo")
# You can use api_version instead of stripe_version
# in File.create. We preserve it for backwards compatibility
def test_create_respects_api_version(
self, setup_upload_api_base, request_mock
):
test_file = tempfile.TemporaryFile()
stripe.File.create(
purpose="dispute_evidence", file=test_file, api_version="foo"
)
request_mock.assert_api_version("foo")
def test_deserializes_from_file(self):
obj = stripe.util.convert_to_stripe_object({"object": "file"})
assert isinstance(obj, stripe.File)
def test_deserializes_from_file_upload(self):
obj = stripe.util.convert_to_stripe_object({"object": "file_upload"})
assert isinstance(obj, stripe.File)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/test_file.py",
"copies": "1",
"size": "2730",
"license": "mit",
"hash": -8570991886434301000,
"line_mean": 33.5569620253,
"line_max": 87,
"alpha_frac": 0.6340659341,
"autogenerated": false,
"ratio": 3.7295081967213113,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4863574130821311,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tempfile
import pytest
import stripe
TEST_RESOURCE_ID = "file_123"
class TestFileUpload(object):
@pytest.fixture(scope="function")
def setup_upload_api_base(self):
stripe.upload_api_base = stripe.api_base
stripe.api_base = None
yield
stripe.api_base = stripe.upload_api_base
stripe.upload_api_base = "https://files.stripe.com"
def test_is_listable(self, request_mock):
resources = stripe.FileUpload.list()
request_mock.assert_requested("get", "/v1/files")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.FileUpload)
def test_is_retrievable(self, request_mock):
resource = stripe.FileUpload.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested("get", "/v1/files/%s" % TEST_RESOURCE_ID)
assert isinstance(resource, stripe.FileUpload)
def test_is_creatable(self, setup_upload_api_base, request_mock):
stripe.multipart_data_generator.MultipartDataGenerator._initialize_boundary = (
lambda self: 1234567890
)
test_file = tempfile.TemporaryFile()
resource = stripe.FileUpload.create(
purpose="dispute_evidence",
file=test_file,
file_link_data={"create": True},
)
request_mock.assert_api_base(stripe.upload_api_base)
request_mock.assert_requested(
"post",
"/v1/files",
headers={
"Content-Type": "multipart/form-data; boundary=1234567890"
},
)
assert isinstance(resource, stripe.FileUpload)
def test_deserializes_from_file(self):
obj = stripe.util.convert_to_stripe_object({"object": "file"})
assert isinstance(obj, stripe.FileUpload)
def test_deserializes_from_file_upload(self):
obj = stripe.util.convert_to_stripe_object({"object": "file_upload"})
assert isinstance(obj, stripe.FileUpload)
| {
"repo_name": "stripe/stripe-python",
"path": "tests/api_resources/test_file_upload.py",
"copies": "1",
"size": "2049",
"license": "mit",
"hash": 4166619915415538000,
"line_mean": 33.7288135593,
"line_max": 87,
"alpha_frac": 0.6427525622,
"autogenerated": false,
"ratio": 3.7458866544789764,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48886392166789766,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from scipy.signal import lfilter
def slice_2d(x, inds0, inds1):
inds0 = tf.cast(inds0, tf.int64)
inds1 = tf.cast(inds1, tf.int64)
shape = tf.cast(tf.shape(x), tf.int64)
ncols = shape[1]
x_flat = tf.reshape(x, [-1])
return tf.gather(x_flat, inds0 * ncols + inds1)
def get_vars_from_scope(scope, graph=None):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
def first_in_collection(name):
return tf.get_collection(name)[0]
def kl_divergence(old_probs, new_probs):
with tf.op_scope([old_probs, new_probs], 'kl_divergence'):
return tf.reduce_sum(old_probs *
(tf.log(new_probs) - tf.log(old_probs), -1))
def likelihood_ratio(x, old_probs, new_probs):
"""
`x` is a one-hot 2D Tensor
"""
with tf.op_scope([x, old_probs, new_probs], 'likelihood_ratio'):
return tf.reduce_sum(x * old_probs, 1) / tf.reduce_sum(x * new_probs,
1)
def entropy(probs):
with tf.op_scope([probs], 'entropy'):
return -tf.reduce_sum(probs * tf.log(probs), 1)
def discount(x, gamma):
return lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
| {
"repo_name": "domluna/deep_rl",
"path": "deep_rl/misc/utils.py",
"copies": "1",
"size": "1302",
"license": "mit",
"hash": -4014078567360275500,
"line_mean": 27.9333333333,
"line_max": 77,
"alpha_frac": 0.5983102919,
"autogenerated": false,
"ratio": 3.0208816705336425,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41191919624336426,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from tensorflow.python.ops.nn import dropout as drop
from util.cnn import fc_layer as fc, fc_relu_layer as fc_relu
# The network that takes in the hidden state of the
def question_prior_net(encoder_states, num_choices, qpn_dropout, hidden_dim=500,
scope='question_prior_net', reuse=None):
with tf.variable_scope(scope, reuse=reuse):
# concate the LSTM states from all layers
assert(isinstance(encoder_states, tuple))
h_list = []
for s in encoder_states:
assert(isinstance(s, tf.contrib.rnn.LSTMStateTuple))
h_list.append(s.h)
# h_concat has shape [N, D_lstm1 + ... + D_lstm_n]
h_concat = tf.concat(h_list, axis=1)
if qpn_dropout:
h_concat = drop(h_concat, 0.5)
fc1 = fc_relu('fc1', h_concat, output_dim=hidden_dim)
if qpn_dropout:
fc1 = drop(fc1, 0.5)
fc2 = fc('fc2', fc1, output_dim=num_choices)
return fc2
| {
"repo_name": "ronghanghu/n2nmn",
"path": "models_vqa/question_prior_net.py",
"copies": "1",
"size": "1094",
"license": "bsd-2-clause",
"hash": -5006335462792846000,
"line_mean": 38.0714285714,
"line_max": 80,
"alpha_frac": 0.6416819013,
"autogenerated": false,
"ratio": 3.315151515151515,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9444487737439169,
"avg_score": 0.002469135802469136,
"num_lines": 28
} |
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from util.cnn import conv_layer as conv
def empty_safe_1x1_conv(name, bottom, output_dim, reuse=None):
# TensorFlow Fold can generate zero-size batch for conv layer
# which will crash cuDNN on backward pass. So use this
# for 1x1 convolution in modules to avoid the crash.
bottom_shape = tf.shape(bottom)
N = bottom_shape[0]
H = bottom_shape[1]
W = bottom_shape[2]
input_dim = bottom.get_shape().as_list()[-1]
bottom_flat = tf.reshape(bottom, [-1, input_dim])
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
weights_initializer = tf.contrib.layers.xavier_initializer()
biases_initializer = tf.constant_initializer(0.)
weights = tf.get_variable('weights', [input_dim, output_dim],
initializer=weights_initializer)
biases = tf.get_variable('biases', output_dim,
initializer=biases_initializer)
conv_flat = tf.nn.xw_plus_b(bottom_flat, weights, biases)
conv = tf.reshape(conv_flat, to_T([N, H, W, output_dim]))
return conv
# TensorFlow Fold can generate zero-size batch for conv layer
# which will crash cuDNN on backward pass. So use this
# for arbitrary convolution in modules to avoid the crash.
def empty_safe_conv(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None,
biases_initializer=None, reuse=None):
g = tf.get_default_graph()
with g.gradient_override_map({'Conv2D': 'Conv2D_handle_empty_batch'}):
return conv(name, bottom, kernel_size, stride, output_dim,
padding, bias_term, weights_initializer,
biases_initializer, reuse=reuse)
@tf.RegisterGradient('Conv2D_handle_empty_batch')
def _Conv2DGrad(op, grad):
with tf.device('/cpu:0'):
return [tf.nn.conv2d_backprop_input(
tf.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr('strides'),
op.get_attr('padding'), op.get_attr('use_cudnn_on_gpu'),
op.get_attr('data_format')),
tf.nn.conv2d_backprop_filter(op.inputs[0],
tf.shape(op.inputs[1]), grad,
op.get_attr('strides'),
op.get_attr('padding'),
op.get_attr('use_cudnn_on_gpu'),
op.get_attr('data_format'))]
# @tf.RegisterGradient('Conv2D_handle_empty_batch')
# def _Conv2DGrad(op, grad):
# def _input_nonempty():
# return tf.nn.conv2d_backprop_input(
# tf.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr('strides'),
# op.get_attr('padding'), op.get_attr('use_cudnn_on_gpu'),
# op.get_attr('data_format'))
# def _filter_nonempty():
# return tf.nn.conv2d_backprop_filter(op.inputs[0],
# tf.shape(op.inputs[1]), grad,
# op.get_attr('strides'),
# op.get_attr('padding'),
# op.get_attr('use_cudnn_on_gpu'),
# op.get_attr('data_format'))
# def _input_empty():
# return tf.zeros_like(op.inputs[0])
# def _filter_empty():
# return tf.zeros_like(op.inputs[1])
# is_nonempty = tf.greater(tf.size(op.inputs[0]), 0)
# return [tf.cond(is_nonempty, _input_nonempty, _input_empty),
# tf.cond(is_nonempty, _filter_nonempty, _filter_empty)]
| {
"repo_name": "ronghanghu/n2nmn",
"path": "util/empty_safe_conv.py",
"copies": "1",
"size": "3817",
"license": "bsd-2-clause",
"hash": -7916809600594072000,
"line_mean": 47.3164556962,
"line_max": 83,
"alpha_frac": 0.5679853288,
"autogenerated": false,
"ratio": 3.6352380952380954,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47032234240380955,
"avg_score": null,
"num_lines": null
} |