text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import errno
import six
import socket
import ssl
from abc import ABCMeta, abstractmethod
import logging
from threading import RLock
from ssl import SSLError
import datetime
import time
from . import compat
from .proto import Frame
from .concurrency import synchronized
from .exceptions import UnexpectedFrame
from .utils import get_errno
from .spec import FrameType
log = logging.getLogger('amqpy')
compat.patch()
_UNAVAIL = {errno.EAGAIN, errno.EINTR, errno.ENOENT}
AMQP_PROTOCOL_HEADER = b'AMQP\x00\x00\x09\x01' # bytes([65, 77, 81, 80, 0, 0, 9, 1])
class Transport:
__metaclass__ = ABCMeta
"""Common superclass for TCP and SSL transports"""
connected = False
def __init__(self, host, port, connect_timeout, buf_size):
"""
:param host: hostname or IP address
:param port: port
:param connect_timeout: connect timeout
:type host: str
:type port: int
:type connect_timeout: float or None
"""
self._rbuf = bytearray(buf_size)
#: :type: datetime.datetime
self.last_heartbeat_sent = None
#: :type: datetime.datetime
self.last_heartbeat_received = None
self.last_heartbeat_sent_monotonic = 0.0
# the purpose of the frame lock is to allow no more than one thread to read/write a frame
# to the connection at any time
self._frame_write_lock = RLock()
self._frame_read_lock = RLock()
self.sock = None
# try to connect
last_err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.settimeout(connect_timeout)
self.sock.connect(sa)
break
except socket.error as exc:
self.sock.close()
self.sock = None
last_err = exc
if not self.sock:
# didn't connect, return the most recent error message
raise socket.error(last_err)
try:
assert isinstance(self.sock, socket.socket)
self.sock.settimeout(None)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self._setup_transport()
self.write(AMQP_PROTOCOL_HEADER)
except (OSError, IOError, socket.error) as exc:
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise
self.connected = True
def __del__(self):
try:
# socket module may have been collected by gc if this is called by a thread at shutdown
if socket is not None:
# noinspection PyBroadException
try:
self.close()
except:
pass
finally:
self.sock = None
def _read(self, n, initial, _errnos):
"""Read from socket
This is the default implementation. Subclasses may implement `read()` to simply call this
method, or provide their own `read()` implementation.
Note: According to SSL_read(3), it can at most return 16kB of data. Thus, we use an internal
read buffer like to get the exact number of bytes wanted.
Note: ssl.sock.read may cause ENOENT if the operation couldn't be performed (?).
:param int n: exact number of bytes to read
:return: data read
:rtype: memoryview
"""
mview = memoryview(self._rbuf)
to_read = n
while to_read:
try:
bytes_read = self.sock.recv_into(mview, to_read)
mview = mview[bytes_read:]
to_read -= bytes_read
except socket.error as exc:
if not initial and exc.errno in _errnos:
continue
raise
if not bytes_read:
raise IOError('socket closed')
return memoryview(self._rbuf)[:n]
@abstractmethod
def read(self, n, initial=False):
"""Read exactly `n` bytes from the peer
:param n: number of bytes to read
:type n: int
:return: data read
:rtype: bytes
"""
pass
@abstractmethod
def write(self, s):
"""Completely write a string to the peer
"""
def _setup_transport(self):
"""Do any additional initialization of the class (used by the subclasses)
"""
pass
def close(self):
if self.sock is not None:
# call shutdown first to make sure that pending messages reach the AMQP broker if the
# program exits after calling this method
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.sock = None
self.connected = False
@synchronized('_frame_read_lock')
def read_frame(self):
"""Read frame from connection
Note that the frame may be destined for any channel. It is permitted to interleave frames
from different channels.
:return: frame
:rtype: amqpy.proto.Frame
"""
frame = Frame()
try:
# read frame header: 7 bytes
frame_header = self.read(7, True)
frame.data.extend(frame_header)
# read frame payload
payload = self.read(frame.payload_size)
frame.data.extend(payload)
# read frame terminator byte
frame_terminator = self.read(1)
frame.data.extend(frame_terminator)
if six.PY2:
#: :type: int
i_last_byte = six.byte2int(frame_terminator)
else:
# this fixes the change in memoryview in Python 3.3 (accessing an element returns the
# correct type)
#: :type: int
i_last_byte = six.byte2int(bytes(frame_terminator))
except (OSError, IOError, socket.error) as exc:
# don't disconnect for ssl read time outs (Python 3.2):
# http://bugs.python.org/issue10272
if isinstance(exc, SSLError) and 'timed out' in str(exc):
raise socket.timeout()
if get_errno(exc) not in _UNAVAIL and not isinstance(exc, socket.timeout):
self.connected = False
raise
if i_last_byte == FrameType.END:
if frame.frame_type == FrameType.HEARTBEAT:
self.last_heartbeat_received = datetime.datetime.now()
return frame
else:
raise UnexpectedFrame('Received {} while expecting 0xce (FrameType.END)'.format(hex(i_last_byte)))
@synchronized('_frame_write_lock')
def write_frame(self, frame):
"""Write frame to connection
Note that the frame may be destined for any channel. It is permitted to interleave frames
from different channels.
:param frame: frame
:type frame: amqpy.proto.Frame
"""
try:
self.write(frame.data)
except socket.timeout:
raise
except (OSError, IOError, socket.error) as exc:
if get_errno(exc) not in _UNAVAIL:
self.connected = False
raise
def send_heartbeat(self):
"""Send a heartbeat to the server
"""
self.last_heartbeat_sent = datetime.datetime.now()
self.last_heartbeat_sent_monotonic = time.monotonic()
self.write_frame(Frame(FrameType.HEARTBEAT))
def is_alive(self):
"""Check if connection is alive
This method is the primary way to check if the connection is alive.
Side effects: This method may send a heartbeat as a last resort to check if the connection
is alive.
:return: True if connection is alive, else False
:rtype: bool
"""
if not self.sock:
# we don't have a valid socket, this connection is definitely not alive
return False
if not self.connected:
# the `transport` is not connected
return False
# recv with MSG_PEEK to check if the connection is alive
# note: if there is data still in the buffer, this will not tell us anything
# if hasattr(socket, 'MSG_PEEK') and not isinstance(self.sock, ssl.SSLSocket):
# prev = self.sock.gettimeout()
# self.sock.settimeout(0.0001)
# try:
# self.sock.recv(1, socket.MSG_PEEK)
# except socket.timeout:
# pass
# except socket.error:
# # the exception is usually (always?) a ConnectionResetError in Python 3.3+
# log.debug('socket.error, connection is closed')
# return False
# finally:
# self.sock.settimeout(prev)
# send a heartbeat to check if the connection is alive
try:
self.send_heartbeat()
except socket.error:
return False
return True
class SSLTransport(Transport):
"""Transport that works over SSL
"""
def __init__(self, host, port, connect_timeout, frame_max, ssl_opts):
self.ssl_opts = ssl_opts
super(SSLTransport, self).__init__(host, port, connect_timeout, frame_max)
def _setup_transport(self):
"""Wrap the socket in an SSL object
"""
self.sock = ssl.wrap_socket(self.sock, **self.ssl_opts)
def read(self, n, initial=False):
"""Read from socket
According to SSL_read(3), it can at most return 16kb of data. Thus, we use an internal read
buffer like `TCPTransport.read()` to get the exact number of bytes wanted.
:param int n: exact number of bytes to read
:return: data read
:rtype: bytes
"""
return self._read(n, initial, _errnos=(errno.ENOENT, errno.EAGAIN, errno.EINTR))
def write(self, s):
"""Write a string out to the SSL socket fully
"""
try:
write = self.sock.write
except AttributeError:
# works around a bug in python socket library
raise IOError('Socket closed')
else:
while s:
n = write(s)
if not n:
raise IOError('Socket closed')
s = s[n:]
class TCPTransport(Transport):
"""Transport that deals directly with TCP socket
"""
def read(self, n, initial=False):
"""Read exactly n bytes from the socket
:param int n: exact number of bytes to read
:return: data read
:rtype: bytes
"""
return self._read(n, initial, _errnos=(errno.EAGAIN, errno.EINTR))
def write(self, s):
self.sock.sendall(s)
def create_transport(host, port, connect_timeout, frame_max, ssl_opts=None):
"""Given a few parameters from the Connection constructor, select and create a subclass of
Transport
If `ssl_opts` is a dict, SSL will be used and `ssl_opts` will be passed to
:func:`ssl.wrap_socket()`. In all other cases, SSL will not be used.
:param host: host
:param connect_timeout: connect timeout
:param ssl_opts: ssl options passed to :func:`ssl.wrap_socket()`
:type host: str
:type connect_timeout: float or None
:type ssl_opts: dict or None
"""
if isinstance(ssl_opts, dict):
return SSLTransport(host, port, connect_timeout, frame_max, ssl_opts)
else:
return TCPTransport(host, port, connect_timeout, frame_max)
| {
"repo_name": "veegee/amqpy",
"path": "amqpy/transport.py",
"copies": "1",
"size": "11809",
"license": "mit",
"hash": -7205074165750022000,
"line_mean": 31.6215469613,
"line_max": 110,
"alpha_frac": 0.5833686172,
"autogenerated": false,
"ratio": 4.264716504153124,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5348085121353123,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import gc
import os
from signal import SIGHUP
import threading
import time
import signal
from amqpy.login import login_response_plain
import pytest
from .. import Channel, NotFound, FrameError, spec, Connection
from ..proto import Method
class TestConnection:
def test_create_channel(self, conn):
ch = conn.channel(1)
assert isinstance(ch, Channel)
assert ch.channel_id == 1
ch2 = conn.channel()
assert ch2.channel_id != 1
ch.close()
ch2.close()
def test_close(self, conn):
"""Make sure we've broken various references when closing channels and connections, to help
with GC
"""
# create a channel and make sure it's linked as we'd expect
ch = conn.channel()
assert 1 in conn.channels
assert ch.connection == conn
assert ch.is_open is True
# close the channel and make sure the references are broken that we expect
ch.close()
assert ch.connection is None
assert 1 not in conn.channels
assert ch.callbacks == {}
assert ch.is_open is False
# close the connection and make sure the references we expect are gone
conn.close()
assert conn.connection is None
assert conn.channels is not None
def test_open_close_open(self):
# create a new connection
conn = Connection()
# close the connection
conn.close()
# reopen the connection
conn.connect()
def test_is_alive(self, conn):
assert conn.is_alive()
def test_is_alive_after_close(self, conn):
conn.close()
assert conn.is_alive() is False
def test_is_alive_chan_exception(self, conn, ch, rand_queue):
"""Make sure connection is still alive after a channel exception
"""
with pytest.raises(NotFound):
ch.queue_declare(rand_queue, passive=True)
assert conn.is_alive()
def test_is_alive_conn_exception(self, conn, rand_queue):
"""Make sure is_alive() returns False after a connection exception
"""
ch = Channel(conn, 10)
with pytest.raises(NotFound):
ch.queue_declare(rand_queue, passive=True)
with pytest.raises(FrameError):
conn.method_writer.write_method(Method(spec.Queue.Declare, channel_id=10))
conn.wait()
assert conn.is_alive() is False
def test_gc_closed(self, conn):
"""Make sure we've broken various references when closing channels and connections, to help
with GC
"""
unreachable_before = len(gc.garbage)
# create a channel and make sure it's linked as we'd expect
conn.channel()
assert 1 in conn.channels
# close the connection and make sure the references we expect are gone.
conn.close()
gc.collect()
gc.collect()
gc.collect()
assert unreachable_before == len(gc.garbage)
def test_gc_forget(self, conn):
"""Make sure the connection gets gc'ed when there is no more references to it
"""
unreachable_before = len(gc.garbage)
ch = conn.channel()
assert 1 in conn.channels
del ch
gc.collect()
gc.collect()
gc.collect()
assert unreachable_before == len(gc.garbage)
@pytest.mark.skipif('sys.version_info >= (3, 5) or sys.version_info[0] == 2')
def test_interrupted(self, conn):
"""Make sure to get InterruptedError if a read was interrupted
"""
def sig_handler(sig, frame):
pass
signal.signal(SIGHUP, sig_handler)
def interrupt_it():
time.sleep(1)
os.kill(os.getpid(), signal.SIGHUP)
th = threading.Thread(target=interrupt_it)
th.start()
with pytest.raises(InterruptedError):
conn.drain_events(2)
class TestLogin:
def test_login_response_plain(self):
b = login_response_plain('blah', 'blah')
assert isinstance(b, bytes)
| {
"repo_name": "veegee/amqpy",
"path": "amqpy/tests/test_connection.py",
"copies": "1",
"size": "4150",
"license": "mit",
"hash": -3588281565565669400,
"line_mean": 27.231292517,
"line_max": 99,
"alpha_frac": 0.6127710843,
"autogenerated": false,
"ratio": 4.1708542713567835,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5283625355656784,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import logging
import time
from functools import wraps
from . import compat
compat.patch() # monkey-patch time.perf_counter
log = logging.getLogger('amqpy')
def synchronized(lock_name):
"""Decorator for automatically acquiring and releasing lock for method call
This decorator accesses the `lock_name` :class:`threading.Lock` attribute of the instance that
the wrapped method is bound to. The lock is acquired (blocks indefinitely) before the method is
called. After the method has executed, the lock is released.
Decorated methods should not be long-running operations, since the lock is held for the duration
of the method's execution.
:param lock_name: name of :class:`threading.Lock` object
"""
def decorator(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
lock = getattr(self, lock_name)
acquired = lock.acquire(False)
if not acquired:
# log.debug('> Wait to acquire lock for [{}]'.format(f.__qualname__))
start_time = time.perf_counter()
lock.acquire()
tot_time = time.perf_counter() - start_time
if tot_time > 5:
# only log if waited for more than 10s to acquire lock
log.warn('Acquired lock for [{}] in: {:.3f}s'.format(f.__qualname__, tot_time))
try:
retval = f(self, *args, **kwargs)
finally:
lock.release()
return retval
return wrapper
return decorator
def synchronized_connection():
"""Decorator for automatically acquiring and releasing a connection-level
lock for method call
"""
def decorator(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if hasattr(self, 'connection'):
lock = self.connection.conn_lock
elif hasattr(self, 'conn_lock'):
lock = self.conn_lock
else:
raise Exception('Unable to find `lock` attribute')
acquired = lock.acquire(False)
if not acquired:
# log.debug('> Wait to acquire lock for [{}]'.format(f.__qualname__))
start_time = time.perf_counter()
lock.acquire()
tot_time = time.perf_counter() - start_time
if tot_time > 5:
# only log if waited for more than 10s to acquire lock
log.warn('Acquired lock for [{}] in: {:.3f}s'.format(f.__qualname__, tot_time))
try:
retval = f(self, *args, **kwargs)
finally:
lock.release()
return retval
return wrapper
return decorator
| {
"repo_name": "veegee/amqpy",
"path": "amqpy/concurrency.py",
"copies": "1",
"size": "2841",
"license": "mit",
"hash": -8980599050588541000,
"line_mean": 32.8214285714,
"line_max": 100,
"alpha_frac": 0.5705737416,
"autogenerated": false,
"ratio": 4.397832817337461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5468406558937461,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import logging
from .. import Message, AbstractConsumer
from ..exceptions import Timeout
log = logging.getLogger('amqpy')
class Consumer(AbstractConsumer):
def run(self, msg):
log.info('{.consumer_tag} received message: {} {}'.format(self, msg.properties, msg.body))
msg.ack()
class TestConsumer:
def test_basic_consume(self, conn, ch, rand_exch, rand_queue):
self.consume_count = 0
def consumer(msg):
"""Consume message
"""
global consume_count
log.info('Received message: {}'.format(msg.body))
msg.ack()
self.consume_count += 1
q = rand_queue
rk = q
exch = rand_exch
log.info('Declare exchange')
ch.exchange_declare(exch, 'direct')
log.info('Declare queue')
ch.queue_declare(q)
log.info('Bind queue')
ch.queue_bind(q, exch, rk)
log.info('Enable publisher confirms')
ch.confirm_select()
log.info('Publish messages')
ch.basic_publish(Message('Hello, world!', content_type='text/plain'), exch, rk)
ch.basic_publish(Message('Hello, world!', content_type='text/plain'), exch, rk)
log.info('Declare consumer')
ch.basic_consume(q, callback=consumer)
log.info('Publish messages')
ch.basic_publish(Message('Hello, world!', content_type='text/plain'), exch, rk)
ch.basic_publish(Message('Hello, world!', content_type='text/plain'), exch, rk)
log.info('Begin draining events')
while True:
try:
conn.drain_events(0.1)
except Timeout:
break
assert self.consume_count == 4
def test_consumer(self, conn, ch, rand_exch, rand_queue):
q = rand_queue
rk = q
exch = rand_exch
log.info('Declare exchange')
ch.exchange_declare(exch, 'direct')
log.info('Declare queue')
ch.queue_declare(q)
log.info('Bind queue')
ch.queue_bind(q, exch, rk)
log.info('Enable publisher confirms')
ch.confirm_select()
log.info('Set QOS')
ch.basic_qos(prefetch_count=1, a_global=True)
log.info('Publish messages')
for i in range(10):
ch.basic_publish(Message('{}: Hello, world!'.format(i)), exch, rk)
log.info('Declare consumer')
c1 = Consumer(ch, q)
c1.declare()
log.info('Begin draining events')
while True:
try:
conn.drain_events(0.1)
except Timeout:
break
assert c1.consume_count == 10
| {
"repo_name": "veegee/amqpy",
"path": "amqpy/tests/test_consumer.py",
"copies": "1",
"size": "2747",
"license": "mit",
"hash": -6659534803919385000,
"line_mean": 25.931372549,
"line_max": 98,
"alpha_frac": 0.5675282126,
"autogenerated": false,
"ratio": 3.7374149659863947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9802219867039554,
"avg_score": 0.0005446623093681917,
"num_lines": 102
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import zipfile
import tarfile
import os
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = False
result = super(ActionModule, self).run(tmp, task_vars)
src = self._task.args.get("src")
proj_path = self._task.args.get("project_path")
force = self._task.args.get("force", False)
try:
archive = zipfile.ZipFile(src)
get_filenames = archive.namelist
get_members = archive.infolist
except zipfile.BadZipFile:
archive = tarfile.open(src)
get_filenames = archive.getnames
get_members = archive.getmembers
except tarfile.ReadError:
result["failed"] = True
result["msg"] = "{0} is not a valid archive".format(src)
return result
# Most well formed archives contain a single root directory, typically named
# project-name-1.0.0. The project contents should be inside that directory.
start_index = 0
root_contents = set(
[filename.split(os.path.sep)[0] for filename in get_filenames()]
)
if len(root_contents) == 1:
start_index = len(list(root_contents)[0]) + 1
for member in get_members():
try:
filename = member.filename
except AttributeError:
filename = member.name
# Skip the archive base directory
if not filename[start_index:]:
continue
dest = os.path.join(proj_path, filename[start_index:])
if not force and os.path.exists(dest):
continue
try:
is_dir = member.is_dir()
except AttributeError:
is_dir = member.isdir()
if is_dir:
os.makedirs(dest, exist_ok=True)
else:
try:
member_f = archive.open(member)
except TypeError:
member_f = tarfile.ExFileObject(archive, member)
with open(dest, "wb") as f:
f.write(member_f.read())
member_f.close()
archive.close()
result["changed"] = True
return result
| {
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"path": "third_party/github.com/ansible/awx/awx/playbooks/action_plugins/project_archive.py",
"copies": "1",
"size": "2493",
"license": "apache-2.0",
"hash": 4669665804977077000,
"line_mean": 29.4024390244,
"line_max": 84,
"alpha_frac": 0.5547533093,
"autogenerated": false,
"ratio": 4.459749552772808,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5514502862072808,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
"""
monotonic
~~~~~~~~~
This module provides a ``monotonic()`` function which returns the
value (in fractional seconds) of a clock which never goes backwards.
On Python 3.3 or newer, ``monotonic`` will be an alias of
``time.monotonic`` from the standard library. On older versions,
it will fall back to an equivalent implementation:
+-------------+--------------------+
| Linux, BSD | clock_gettime(3) |
+-------------+--------------------+
| Windows | GetTickCount64 |
+-------------+--------------------+
| OS X | mach_absolute_time |
+-------------+--------------------+
If no suitable implementation exists for the current platform,
attempting to import this module (or to import from it) will
cause a RuntimeError exception to be raised.
Copyright 2014 Ori Livneh <ori@wikimedia.org>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ctypes
import ctypes.util
import os
import platform
import re
import sys
import time
__all__ = ('monotonic',)
def get_os_release():
"""Get the leading numeric component of the OS release."""
return re.match('[\d.]+', platform.release()).group(0)
def compare_versions(v1, v2):
"""Compare two version strings.
Returns True if the former is (strictly) greater than the later,
False otherwise.
"""
def normalize(v):
return tuple(map(int, re.sub(r'(\.0+)*$', '', v).split('.')))
return normalize(v1) > normalize(v2)
try:
monotonic = time.monotonic
except AttributeError:
try:
if sys.platform == 'darwin': # OS X, iOS
# See Technical Q&A QA1398 of the Mac Developer Library:
# <https://developer.apple.com/library/mac/qa/qa1398/>
libc = ctypes.CDLL('libc.dylib', use_errno=True)
class mach_timebase_info_data_t(ctypes.Structure):
"""System timebase info. Defined in <mach/mach_time.h>."""
_fields_ = (('numer', ctypes.c_uint32),
('denom', ctypes.c_uint32))
mach_absolute_time = libc.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
timebase = mach_timebase_info_data_t()
libc.mach_timebase_info(ctypes.byref(timebase))
ticks_per_second = timebase.numer / timebase.denom * 1.0e9
def monotonic():
"""Monotonic clock, cannot go backward."""
return mach_absolute_time() / ticks_per_second
elif sys.platform.startswith('win32'):
# Windows Vista / Windows Server 2008 or newer.
GetTickCount64 = ctypes.windll.kernel32.GetTickCount64
GetTickCount64.restype = ctypes.c_ulonglong
def monotonic():
"""Monotonic clock, cannot go backward."""
return GetTickCount64() / 1000.0
else:
try:
clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
use_errno=True).clock_gettime
except AttributeError:
clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
use_errno=True).clock_gettime
class timespec(ctypes.Structure):
"""Time specification, as described in clock_gettime(3)."""
_fields_ = (('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long))
ts = timespec()
if sys.platform.startswith('linux'):
if compare_versions(get_os_release(), '2.6.28') > 0:
CLOCK_MONOTONIC = 4 # CLOCK_MONOTONIC_RAW
else:
CLOCK_MONOTONIC = 1
elif sys.platform.startswith('freebsd'):
CLOCK_MONOTONIC = 4
elif 'bsd' in sys.platform:
CLOCK_MONOTONIC = 3
def monotonic():
"""Monotonic clock, cannot go backward."""
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)):
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
return ts.tv_sec + ts.tv_nsec / 1.0e9
# Perform a sanity-check.
if monotonic() - monotonic() > 0:
raise ValueError('monotonic() is not monotonic!')
except Exception as err:
raise RuntimeError('no suitable implementation for this system: %s'
% err)
| {
"repo_name": "veegee/amqpy",
"path": "amqpy/support/monotonic.py",
"copies": "1",
"size": "5089",
"license": "mit",
"hash": -3338388679205937000,
"line_mean": 32.9266666667,
"line_max": 75,
"alpha_frac": 0.5735900963,
"autogenerated": false,
"ratio": 4.181594083812654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5255184180112654,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# see https://wiki.python.org/moin/PortingToPy3k/BilingualQuickRef
from os.path import join, basename
from astwro.exttools import Runner
from astwro.config import find_opt_file
#TODO: suport for configuration and conf files (xommon with scamp)
class SexResults(object):
def __init__(self, dir, results_filename):
super(SexResults, self).__init__()
self.dir = dir
self.stars_file = results_filename
self._stars = None
@property
def stars(self):
if self._stars is None:
import astropy.io.fits as pyfits
self._stars = pyfits.getdata(join(self.dir, self.stars_file),2)
return self._stars
class Sextractor(Runner):
""" Emmanuel BERTIN `sextractor` runner
Object of this class maintains single process of `extractor` and it's working directory.
Note
----
Very `alpha` stage, very limited
Parameters
----------
dir : str, TmpDir, optional
Working directory for sextractor process
Default - new TmpDir will be created and destroyed with Sextractor object
conf : str, optional
Patch to sextractor's configuration file, Default: package default file
param : str, optional
Patch to sextractor's parameters file (specified in conf), Default: package default file
conv : str, optional
Patch to sextractor's convolution kernel file (specified in conf), Default: package default file
"""
def __init__(self, dir=None, conf=None, param=None, conv=None, output=None):
# base implementation of __init__ calls `_reset` also
super(Sextractor, self).__init__(dir=dir, batch=False)
self.conf_file = conf
self.param_file = param
self.conv_file = conv
self.output_file = output
self.SEXresults = None
self._update_executable('sextractor')
def _reset(self):
super(Sextractor, self)._reset()
def __deepcopy__(self, memo):
from copy import deepcopy
new = super(Sextractor, self).__deepcopy__(memo)
return new
def _pre_run(self, wait):
super(Sextractor, self)._pre_run(wait=wait)
def _init_workdir_files(self, dir):
super(Sextractor, self)._init_workdir_files(dir)
def __call__(self, image):
""" Run sextractor on image
Parameters
----------
image : str
Path of the FITS image
Returns
-------
SexResults object
"""
if self.conf_file is None:
conf_file = find_opt_file('sextractor.conf', package='sex')
out_file = 'output1.fits'
self._prepare_input_file(find_opt_file('sextractor.param', package='sex'), preservefilename=True)
self._prepare_input_file(find_opt_file('sextractor.conv', package='sex'), preservefilename=True)
else:
out_file = self.output_file
conf_file = self.conf_file
if self.param_file is not None:
self._prepare_input_file(self.param_file, preservefilename=True)
if self.conv_file is not None:
self._prepare_input_file(self.conv_file, preservefilename=True)
self._prepare_input_file(conf_file, preservefilename = True)
self._prepare_input_file(image, preservefilename=True)
self.SEXresults = SexResults(dir=self.dir.path, results_filename=out_file)
self.arguments = [basename(image), '-c', basename(conf_file)]
self.run()
return self.SEXresults
| {
"repo_name": "majkelx/astwro",
"path": "astwro/sex/Sextractor.py",
"copies": "1",
"size": "3625",
"license": "mit",
"hash": -47987725864221290,
"line_mean": 31.6576576577,
"line_max": 109,
"alpha_frac": 0.6284137931,
"autogenerated": false,
"ratio": 3.877005347593583,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5005419140693583,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# see https://wiki.python.org/moin/PortingToPy3k/BilingualQuickRef
from os.path import join, basename, splitext
from astwro.exttools import Runner
from astwro.config import find_opt_file
from .Sextractor import SexResults
class ScampResults(object):
def __init__(self, dir, results_filename):
super(ScampResults, self).__init__()
self.dir = dir
self.header_file = results_filename
self._header = None
@property
def fits_header(self):
if self._header is None:
import astropy.io.fits as pyfits
self._header = pyfits.Header.fromtextfile(self.header_file, endcard=True)
return self._header
class Scamp(Runner):
""" Emmanuel BERTIN `scamp` runner
Object of this class maintains single process of `scamp` and it's working directory.
Note
----
Very `alpha` stage, very limited
Parameters
----------
dir : str, TmpDir, optional
Working directory for sextractor process
Default - new TmpDir will be created and destroyed with Sextractor object
conf : str, optional
Patch to sextractor's configuration file, Default: package default file
param : str, optional
Patch to sextractor's parameters file (specified in conf), Default: package default file
conv : str, optional
Patch to sextractor's convolution file (specified in conf), Default: package default file
"""
def __init__(self, dir=None, conf=None):
# base implementation of __init__ calls `_reset` also
super(Scamp, self).__init__(dir=dir, batch=False)
self.conf_file = conf
self.SCAMPresults = None
self._update_executable('scamp')
def _reset(self):
super(Scamp, self)._reset()
def __deepcopy__(self, memo):
from copy import deepcopy
new = super(Scamp, self).__deepcopy__(memo)
return new
def _pre_run(self, wait):
super(Scamp, self)._pre_run(wait=wait)
def _init_workdir_files(self, dir):
super(Scamp, self)._init_workdir_files(dir)
def __call__(self, src):
""" Run scamp on crc catalog
Parameters
----------
src : str or SexResults
Source catalog. Filename or SexResults
Returns
-------
ScampResults object
"""
if isinstance(src, SexResults):
src = join(src.dir, src.stars_file)
if self.conf_file is None:
conf_file = find_opt_file('scamp.conf', package='sex')
else:
conf_file = self.conf_file
out_file = splitext(basename(src))[0] # 'output1'
self._prepare_input_file(conf_file, preservefilename = True)
self._prepare_input_file(src, preservefilename=True)
self.SCAMPresults = ScampResults(dir=self.dir.path, results_filename=out_file)
self.arguments = [basename(src), '-c', basename(conf_file)]
self.run()
return self.SCAMPresults
| {
"repo_name": "majkelx/astwro",
"path": "astwro/sex/Scamp.py",
"copies": "1",
"size": "3074",
"license": "mit",
"hash": 8100846608023938000,
"line_mean": 28.2761904762,
"line_max": 97,
"alpha_frac": 0.6255692908,
"autogenerated": false,
"ratio": 3.915923566878981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5041492857678981,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
"""
A module for encoding utils
"""
import io
import re
import six
# From IPython.utils.openpy
try:
from tokenize import detect_encoding
except ImportError:
from codecs import lookup, BOM_UTF8
# things we rely on and need to put it in cache early, to avoid recursing.
import encodings.ascii
cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE)
cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE)
# Copied from Python 3.2 tokenize
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
# Copied from Python 3.2 tokenize
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = _get_normal_name(matches[0])
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def strip_encoding_cookie(filelike):
"""Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines.
"""
it = iter(filelike)
try:
first = next(it)
if not cookie_comment_re.match(first):
yield first
second = next(it)
if not cookie_comment_re.match(second):
yield second
except StopIteration:
return
for line in it:
yield line
def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
"""Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code.
"""
if isinstance(txt, six.text_type):
return txt
if isinstance(txt, six.binary_type):
buffer = io.BytesIO(txt)
else:
buffer = txt
try:
encoding, _ = detect_encoding(buffer.readline)
except SyntaxError:
encoding = "ascii"
buffer.seek(0)
newline_decoder = io.IncrementalNewlineDecoder(None, True)
text = io.TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return u"".join(strip_encoding_cookie(text))
else:
return text.read()
def decode_source(source_bytes):
"""Decode bytes representing source code and return the string.
Universal newline support is used in the decoding.
"""
# source_bytes_readline = io.BytesIO(source_bytes).readline
# encoding, _ = detect_encoding(source_bytes_readline)
newline_decoder = io.IncrementalNewlineDecoder(None, True)
return newline_decoder.decode(source_to_unicode(source_bytes))
| {
"repo_name": "asmodehn/filefinder2",
"path": "filefinder2/_encoding_utils.py",
"copies": "1",
"size": "5398",
"license": "mit",
"hash": -2995716072463476700,
"line_mean": 32.3209876543,
"line_max": 83,
"alpha_frac": 0.6026306039,
"autogenerated": false,
"ratio": 4.213895394223263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5316525998123263,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
"""
A module for utils
"""
# We need to be extra careful with python versions
# Ref : https://docs.python.org/2/library/modules.html?highlight=imports
# Ref : https://docs.python.org/3/library/modules.html?highlight=imports
import os
import sys
def _verbose_message(message, *args, **kwargs):
"""Print the message to stderr if -v/PYTHONVERBOSE is turned on."""
verbosity = kwargs.pop('verbosity', 1)
if sys.flags.verbose >= verbosity:
if not message.startswith(('#', 'import ')):
message = '# ' + message
print(message.format(*args), file=sys.stderr)
# From importlib, just checking if ImportError accept name and path
try:
ImportError('msg', name='name', path='path')
except TypeError:
class _ImportError(ImportError):
"""Implementing Import Error with name and path args"""
def __init__(self, *args, **kwargs):
self.name = kwargs.pop('name', None)
self.path = kwargs.pop('path', None)
super(_ImportError, self).__init__(*args, **kwargs)
else:
_ImportError = ImportError
| {
"repo_name": "asmodehn/filefinder2",
"path": "filefinder2/_utils.py",
"copies": "1",
"size": "1147",
"license": "mit",
"hash": -4354857201007341000,
"line_mean": 32.7352941176,
"line_max": 72,
"alpha_frac": 0.6556233653,
"autogenerated": false,
"ratio": 3.941580756013746,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 34
} |
from __future__ import absolute_import, division, print_function
class ApprException(Exception):
status_code = 500
errorcode = "internal-error"
def __init__(self, message, payload=None):
super(ApprException, self).__init__()
self.payload = dict(payload or ())
self.message = message
def to_dict(self):
return {"code": self.errorcode, "message": self.message, "details": self.payload}
def __str__(self):
return self.message
class InvalidUsage(ApprException):
status_code = 400
errorcode = "invalid-usage"
class InvalidRelease(ApprException):
status_code = 422
errorcode = "invalid-release"
class InvalidParams(ApprException):
status_code = 422
errorcode = "invalid-parameters"
class PackageAlreadyExists(ApprException):
status_code = 409
errorcode = "package-exists"
class PackageNotFound(ApprException):
status_code = 404
errorcode = "package-not-found"
class ResourceNotFound(ApprException):
status_code = 404
errorcode = "resource-not-found"
class ChannelNotFound(ApprException):
status_code = 404
errorcode = "channel-not-found"
class Forbidden(ApprException):
status_code = 403
errorcode = "forbidden"
class PackageReleaseNotFound(ApprException):
status_code = 404
errorcode = "package-release-not-found"
class UnauthorizedAccess(ApprException):
status_code = 401
errorcode = "unauthorized-access"
class Unsupported(ApprException):
status_code = 501
errorcode = "unsupported"
class UnableToLockResource(ApprException):
status_code = 409
errorcode = "resource-in-use"
class InvalidVersion(ApprException):
status_code = 422
errorcode = "invalid-version"
class PackageVersionNotFound(ApprException):
status_code = 404
errorcode = "package-version-not-found"
def raise_package_not_found(package, release=None, media_type=None):
raise PackageNotFound("package %s doesn't exist, v: %s, type: %s" % (package, str(release),
str(media_type)),
{'package': package,
'release': release,
'media_type': media_type})
def raise_channel_not_found(package, channel=None, release=None):
if channel is None:
raise ChannelNotFound("No channel found for package '%s'" % (package), {
'package': package})
else:
raise ChannelNotFound("Channel '%s' doesn't exist for package '%s'" % (channel, package), {
'channel': channel,
'package': package,
'release': release})
def raise_package_exists(package, release, media_type):
raise PackageAlreadyExists("%s - %s - %s exists already " % (package, release, media_type), {
"package": package,
"release": release,
"media_type": media_type})
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/exception.py",
"copies": "2",
"size": "2930",
"license": "apache-2.0",
"hash": -1230750518896086800,
"line_mean": 24.9292035398,
"line_max": 99,
"alpha_frac": 0.6392491468,
"autogenerated": false,
"ratio": 3.970189701897019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005758101717857301,
"num_lines": 113
} |
from __future__ import absolute_import, division, print_function
class CurrencyAmount(object):
""" Amount of some given currency.
Protects against naively adding amounts in different currencies.
"""
# TODO Vectorize
def __init__(self, amount, currency):
self.amount = amount
self.currency = currency
def __str__(self):
return '{} {}'.format(self.amount, self.currency)
def __add__(self, other):
assert isinstance(other, CurrencyAmount),\
"Cannot add CurrencyAmount to anything but another CurrencyAmount"
if other.currency == self.currency:
amt = self.amount + other.amount
return CurrencyAmount(amount=amt, currency=self.currency)
else:
raise NotImplementedError("Adding of different currencies")
def __iadd__(self, other):
assert isinstance(other, CurrencyAmount),\
"Cannot add CurrencyAmount to anything but another CurrencyAmount"
if other.currency == self.currency:
self.amount += other.amount
return self
else:
raise NotImplementedError("Adding of different currencies")
def __sub__(self, other):
assert isinstance(other, CurrencyAmount), \
"Can only subtract two CurrencyAmount's"
if other.currency == self.currency:
amt = self.amount - other.amount
return CurrencyAmount(amount=amt, currency=self.currency)
else:
raise NotImplementedError("Adding of different currencies")
def __isub__(self, other):
assert isinstance(other, CurrencyAmount), \
"Can only subtract two CurrencyAmount's"
if other.currency == self.currency:
self.amount -= other.amount
return self
else:
raise NotImplementedError("Adding of different currencies")
def __mul__(self, other):
assert isinstance(other, (int, float)),\
"Can only multiply/divide a CurrencyAmount with a number"
amt = self.amount * other
return CurrencyAmount(amount=amt, currency=self.currency)
def __imul__(self, other):
assert isinstance(other, (int, float)), \
"Can only multiply/divide a CurrencyAmount with a number"
self.amount *= other
return self
def __truediv__(self, other):
assert isinstance(other, (int, float)), \
"Can only multiply/divide a CurrencyAmount with a number"
amt = self.amount / other
return CurrencyAmount(amount=amt, currency=self.currency)
def __idiv__(self, other):
assert isinstance(other, (int, float)), \
"Can only multiply/divide a CurrencyAmount with a number"
self.amount /= other
return self
class CurrencyWallet(object):
"""Dictionary of Currency (str) to CurrencyAmount
This allows one to sum up the values of assets, or cashflows in different
currencies without immediately converting them to a single currency given
a foreign exchange rate, or raise an exception.
"""
def __init__(self, ccy_amt=None):
self._wallet = {}
if ccy_amt is None:
return
elif isinstance(ccy_amt, CurrencyAmount):
self._wallet[ccy_amt.currency] = ccy_amt
else:
raise ValueError('Constructor takes a CurrencyAmount or None')
def __str__(self):
return str(self._wallet)
def __add__(self, other):
res = CurrencyWallet()
res._wallet = dict(self._wallet)
if isinstance(other, None):
return res
elif isinstance(other, CurrencyAmount):
ccy = other.currency
if ccy in res._wallet:
res._wallet[ccy].amount += other.amount
else:
res._wallet[ccy] = other
return res
elif isinstance(other, CurrencyWallet):
for ccy in other._wallet.keys():
if ccy in res._wallet:
res._wallet[ccy].amount += other.amount
else:
res._wallet[ccy] = other
return res
else:
raise ValueError('CurrencyWallet addition only works on '
'a CurrencyAmount or a CurrencyWallet')
def __iadd__(self, other):
if isinstance(other, None):
return self
if isinstance(other, CurrencyAmount):
ccy = other.currency
if ccy in self._wallet:
self._wallet[ccy].amount += other.amount
else:
self._wallet[ccy] = other
return self
elif isinstance(other, CurrencyWallet):
for ccy in other._wallet.keys():
self += other._wallet[ccy]
return self
else:
raise ValueError('CurrencyWallet addition only works on '
'a CurrencyAmount or a CurrencyWallet')
| {
"repo_name": "caseyclements/pennies",
"path": "pennies/__init__.py",
"copies": "1",
"size": "4982",
"license": "apache-2.0",
"hash": 3026248165387718700,
"line_mean": 35.6323529412,
"line_max": 78,
"alpha_frac": 0.5895222802,
"autogenerated": false,
"ratio": 4.566452795600367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016711229946524066,
"num_lines": 136
} |
from __future__ import absolute_import, division, print_function
class FrozenError(AttributeError):
"""
A frozen/immutable instance or attribute haave been attempted to be
modified.
It mirrors the behavior of ``namedtuples`` by using the same error message
and subclassing `AttributeError`.
.. versionadded:: 20.1.0
"""
msg = "can't set attribute"
args = [msg]
class FrozenInstanceError(FrozenError):
"""
A frozen instance has been attempted to be modified.
.. versionadded:: 16.1.0
"""
class FrozenAttributeError(FrozenError):
"""
A frozen attribute has been attempted to be modified.
.. versionadded:: 20.1.0
"""
class AttrsAttributeNotFoundError(ValueError):
"""
An ``attrs`` function couldn't find an attribute that the user asked for.
.. versionadded:: 16.2.0
"""
class NotAnAttrsClassError(ValueError):
"""
A non-``attrs`` class has been passed into an ``attrs`` function.
.. versionadded:: 16.2.0
"""
class DefaultAlreadySetError(RuntimeError):
"""
A default has been set using ``attr.ib()`` and is attempted to be reset
using the decorator.
.. versionadded:: 17.1.0
"""
class UnannotatedAttributeError(RuntimeError):
"""
A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type
annotation.
.. versionadded:: 17.3.0
"""
class PythonTooOldError(RuntimeError):
"""
It was attempted to use an ``attrs`` feature that requires a newer Python
version.
.. versionadded:: 18.2.0
"""
class NotCallableError(TypeError):
"""
A ``attr.ib()`` requiring a callable has been set with a value
that is not callable.
.. versionadded:: 19.2.0
"""
def __init__(self, msg, value):
super(TypeError, self).__init__(msg, value)
self.msg = msg
self.value = value
def __str__(self):
return str(self.msg)
| {
"repo_name": "pantsbuild/pex",
"path": "pex/vendor/_vendored/attrs/attr/exceptions.py",
"copies": "9",
"size": "1950",
"license": "apache-2.0",
"hash": -1632437432992936700,
"line_mean": 20.1956521739,
"line_max": 78,
"alpha_frac": 0.6343589744,
"autogenerated": false,
"ratio": 4.12262156448203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 92
} |
from __future__ import absolute_import, division, print_function
class FrozenError(AttributeError):
"""
A frozen/immutable instance or attribute have been attempted to be
modified.
It mirrors the behavior of ``namedtuples`` by using the same error message
and subclassing `AttributeError`.
.. versionadded:: 20.1.0
"""
msg = "can't set attribute"
args = [msg]
class FrozenInstanceError(FrozenError):
"""
A frozen instance has been attempted to be modified.
.. versionadded:: 16.1.0
"""
class FrozenAttributeError(FrozenError):
"""
A frozen attribute has been attempted to be modified.
.. versionadded:: 20.1.0
"""
class AttrsAttributeNotFoundError(ValueError):
"""
An ``attrs`` function couldn't find an attribute that the user asked for.
.. versionadded:: 16.2.0
"""
class NotAnAttrsClassError(ValueError):
"""
A non-``attrs`` class has been passed into an ``attrs`` function.
.. versionadded:: 16.2.0
"""
class DefaultAlreadySetError(RuntimeError):
"""
A default has been set using ``attr.ib()`` and is attempted to be reset
using the decorator.
.. versionadded:: 17.1.0
"""
class UnannotatedAttributeError(RuntimeError):
"""
A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type
annotation.
.. versionadded:: 17.3.0
"""
class PythonTooOldError(RuntimeError):
"""
It was attempted to use an ``attrs`` feature that requires a newer Python
version.
.. versionadded:: 18.2.0
"""
class NotCallableError(TypeError):
"""
A ``attr.ib()`` requiring a callable has been set with a value
that is not callable.
.. versionadded:: 19.2.0
"""
def __init__(self, msg, value):
super(TypeError, self).__init__(msg, value)
self.msg = msg
self.value = value
def __str__(self):
return str(self.msg)
| {
"repo_name": "python-attrs/attrs",
"path": "src/attr/exceptions.py",
"copies": "2",
"size": "1949",
"license": "mit",
"hash": 1371171942782781400,
"line_mean": 20.1847826087,
"line_max": 78,
"alpha_frac": 0.6341713699,
"autogenerated": false,
"ratio": 4.129237288135593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5763408658035593,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
class Identifier(object):
@property
def valid(self):
return False
def to_dict(self):
raise NotImplementedError
def to_frozenset(self, data=None):
if data is None:
data = self.to_dict()
if type(data) is dict:
data = data.items()
result = []
for item in data:
if type(item) is tuple and len(item) == 2:
key, value = item
else:
key = None
value = item
if type(value) is dict:
value = self.to_frozenset(value)
if type(value) is list:
value = self.to_frozenset(value)
if key is not None:
result.append((key, value))
else:
result.append(value)
return frozenset(result)
def __hash__(self):
return hash(self.to_frozenset())
def __eq__(self, other):
if not other:
return False
return self.__hash__() == other.__hash__()
def __ne__(self, other):
return not(self == other)
| {
"repo_name": "OpenEntityMap/oem-client",
"path": "oem/media/core/base/identifier.py",
"copies": "1",
"size": "1178",
"license": "bsd-3-clause",
"hash": -4771432578384559000,
"line_mean": 22.0980392157,
"line_max": 64,
"alpha_frac": 0.4974533107,
"autogenerated": false,
"ratio": 4.445283018867925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5442736329567924,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
class MantidScriptHandler(object):
script = None
def __init__(self, parameters=None, run=None):
script = "SNSPowderReduction( Filename = '{}',\n \
MaxChunkSize = {}, \n \
PreserveEvents = {}, \n \
PushDataPositive = '{}', \n \
CalibrationFile = '{}', \n \
CharacterizationRunsFile = '{}', \n \
BackgroundNumber = '{}', \n \
VanadiumNumber = '{}', \n \
VanadiumBackgroundNumber = '{}', \n \
ExpIniFileName = '{}', \n \
RemovePromptPulseWidth = {}, \n \
ResampleX = {}, \n \
BinInDSpace = {}, \n \
FilterBadPulses = {}, \n \
CropWavelengthMin = {}, \n \
CropWavelengthMax = {}, \n \
SaveAs = '{}', \n \
OutputDirectory = '{}', \n \
StripVanadiumPeaks = {}, \n \
VanadiumRadius = {}, \n \
NormalizeByCurrent = {}, \n \
FinalDataUnits = '{}')".format(run,
parameters['max_chunk_size'],
parameters['preserve_events'],
parameters['push_data_positive'],
parameters['calibration_file'],
parameters['characterization_file'],
parameters['background_number'],
parameters['vanadium_number'],
parameters['vanadium_background_number'],
parameters['exp_ini_filename'],
parameters['remove_prompt_pulse_width'],
parameters['resamplex'],
parameters['bin_in_d_space'],
parameters['filter_bad_pulses'],
parameters['crop_wavelength_min'],
parameters['crop_wavelength_max'],
parameters['save_as'],
parameters['output_directory'],
parameters['strip_vanadium_peaks'],
parameters['vanadium_radius'],
parameters['normalize_by_current'],
parameters['final_data_units'])
self.script = script
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/idl/mantid_script_handler.py",
"copies": "1",
"size": "2548",
"license": "mit",
"hash": 2212594812738616600,
"line_mean": 46.1851851852,
"line_max": 80,
"alpha_frac": 0.4148351648,
"autogenerated": false,
"ratio": 5.352941176470588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6267776341270589,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
class Namespaces(object):
"""
Class for holding and maniputlating a dictionary containing the various namespaces for
each standard.
"""
namespace_dict = {
'atom' : 'http://www.w3.org/2005/Atom',
'csw' : 'http://www.opengis.net/cat/csw/2.0.2',
'dc' : 'http://purl.org/dc/elements/1.1/',
'dct' : 'http://purl.org/dc/terms/',
'dif' : 'http://gcmd.gsfc.nasa.gov/Aboutus/xml/dif/',
'draw' : 'gov.usgs.cida.gdp.draw',
'fes' : 'http://www.opengis.net/fes/2.0',
'fgdc' : 'http://www.opengis.net/cat/csw/csdgm',
'gco' : 'http://www.isotc211.org/2005/gco',
'gmd' : 'http://www.isotc211.org/2005/gmd',
'gmi' : 'http://www.isotc211.org/2005/gmi',
'gml' : 'http://www.opengis.net/gml',
'gml311': 'http://www.opengis.net/gml',
'gml32' : 'http://www.opengis.net/gml/3.2',
'gmx' : 'http://www.isotc211.org/2005/gmx',
'gts' : 'http://www.isotc211.org/2005/gts',
'ogc' : 'http://www.opengis.net/ogc',
'om' : 'http://www.opengis.net/om/1.0',
'om10' : 'http://www.opengis.net/om/1.0',
'om100' : 'http://www.opengis.net/om/1.0',
'om20' : 'http://www.opengis.net/om/2.0',
'ows' : 'http://www.opengis.net/ows',
'ows100': 'http://www.opengis.net/ows',
'ows110': 'http://www.opengis.net/ows/1.1',
'ows200': 'http://www.opengis.net/ows/2.0',
'rim' : 'urn:oasis:names:tc:ebxml-regrep:xsd:rim:3.0',
'rdf' : 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'sml' : 'http://www.opengis.net/sensorML/1.0.1',
'sml101': 'http://www.opengis.net/sensorML/1.0.1',
'sos' : 'http://www.opengis.net/sos/1.0',
'sos20' : 'http://www.opengis.net/sos/2.0',
'srv' : 'http://www.isotc211.org/2005/srv',
'swe' : 'http://www.opengis.net/swe/1.0.1',
'swe10' : 'http://www.opengis.net/swe/1.0',
'swe101': 'http://www.opengis.net/swe/1.0.1',
'swe20' : 'http://www.opengis.net/swe/2.0',
'swes' : 'http://www.opengis.net/swes/2.0',
'tml' : 'ttp://www.opengis.net/tml',
'wfs' : 'http://www.opengis.net/wfs',
'wfs20' : 'http://www.opengis.net/wfs/2.0',
'wcs' : 'http://www.opengis.net/wcs',
'wps' : 'http://www.opengis.net/wps/1.0.0',
'wps100': 'http://www.opengis.net/wps/1.0.0',
'xlink' : 'http://www.w3.org/1999/xlink',
'xs' : 'http://www.w3.org/2001/XMLSchema',
'xs2' : 'http://www.w3.org/XML/Schema',
'xsi' : 'http://www.w3.org/2001/XMLSchema-instance'
}
def get_namespace(self, key):
"""
Retrieves a namespace from the dictionary
Example:
--------
>>> from owslib.namespaces import Namespaces
>>> ns = Namespaces()
>>> ns.get_namespace('csw')
'http://www.opengis.net/cat/csw/2.0.2'
>>> ns.get_namespace('wfs20')
'http://www.opengis.net/wfs/2.0'
"""
retval = None
if key in self.namespace_dict.keys():
retval = self.namespace_dict[key]
return retval
def get_versioned_namespace(self, key, ver=None):
"""
Retrieves a namespace from the dictionary with a specific version number
Example:
--------
>>> from owslib.namespaces import Namespaces
>>> ns = Namespaces()
>>> ns.get_versioned_namespace('ows')
'http://www.opengis.net/ows'
>>> ns.get_versioned_namespace('ows','1.1.0')
'http://www.opengis.net/ows/1.1'
"""
if ver is None:
return self.get_namespace(key)
version = ''
# Strip the decimals out of the passed in version
for s in ver.split('.'):
version += s
key += version
retval = None
if key in self.namespace_dict.keys():
retval = self.namespace_dict[key]
return retval
def get_namespaces(self, keys=None):
"""
Retrieves a dict of namespaces from the namespace mapping
Parameters
----------
- keys: List of keys query and return
Example:
--------
>>> ns = Namespaces()
>>> x = ns.get_namespaces(['csw','gmd'])
>>> x == {'csw': 'http://www.opengis.net/cat/csw/2.0.2', 'gmd': 'http://www.isotc211.org/2005/gmd'}
True
>>> x = ns.get_namespaces('csw')
>>> x == {'csw': 'http://www.opengis.net/cat/csw/2.0.2'}
True
>>> ns.get_namespaces()
{...}
"""
# If we aren't looking for any namespaces in particular return the whole dict
if keys is None or len(keys) == 0:
return self.namespace_dict
if isinstance(keys, unicode) or isinstance(keys, str):
return { keys: self.get_namespace(keys) }
retval = {}
for key in keys:
retval[key] = self.get_namespace(key)
return retval
def get_namespace_from_url(self, url):
for k, v in self.namespace_dict.items():
if v == url:
return k
return None
| {
"repo_name": "b-cube/pipeline-demo",
"path": "demo/bcube_owslib/namespaces.py",
"copies": "3",
"size": "5547",
"license": "mit",
"hash": 7708621025525921000,
"line_mean": 36.7346938776,
"line_max": 111,
"alpha_frac": 0.4979268073,
"autogenerated": false,
"ratio": 3.1023489932885906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5100275800588591,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
class serviceBase(object):
def __init__(self, session_factory, debug=False):
"""Must send in either a session_factory."""
self._session_factory = session_factory
self._session = self._session_factory.getSession()
self._version = session_factory.version
self._debug = debug
def getSession(self):
if self._session is None:
self._session = self._session_factory.getSession()
return self._session
def reset_session(self):
self._session = self._session_factory.getSession()
class Base(object):
from sqlalchemy.ext.declarative import declared_attr
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {u'schema': 'odm2'}
def __init__(self, *args, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
valuedict = self.__dict__.copy()
for v in valuedict.keys():
if 'obj' in v.lower():
del valuedict[v]
if v == '_sa_instance_state':
del valuedict['_sa_instance_state']
return '<%s(%s)>' % (self.__class__.__name__, str(valuedict))
class modelBase():
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base(cls=Base)
| {
"repo_name": "ODM2/ODM2PythonAPI",
"path": "odm2api/base.py",
"copies": "2",
"size": "1491",
"license": "bsd-3-clause",
"hash": -6934053094602447000,
"line_mean": 27.1320754717,
"line_max": 69,
"alpha_frac": 0.5895372233,
"autogenerated": false,
"ratio": 4.018867924528302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5608405147828301,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
class Step1Utilities(object):
def __init__(self, main_window=None):
self.main_window = main_window
def is_diamond_text_empty(self):
_diamond_field = str(self.main_window.autonom_ui.diamond.text()).strip().replace(" ", "")
if _diamond_field == "":
return True
else:
return False
def is_diamond_background_text_empty(self):
_diamond_background_field = str(self.main_window.autonom_ui.diamond_background.text()).strip().replace(" ", "")
if _diamond_background_field == "":
return True
else:
return False
def is_vanadium_text_empty(self):
_vanadium_field = str(self.main_window.autonom_ui.vanadium.text()).strip().replace(" ", "")
if _vanadium_field == "":
return True
else:
return False
def is_vanadium_background_text_empty(self):
_vanadium_background_field = str(self.main_window.autonom_ui.vanadium_background.text()).strip().replace(" ", "")
if _vanadium_background_field == "":
return True
else:
return False
def is_sample_background_text_empty(self):
_sample_background_field = str(self.main_window.autonom_ui.sample_background.text()).strip().replace(" ", "")
if _sample_background_field == "":
return True
else:
return False
def is_create_folder_button_status_ok(self):
if self.main_window.autonom_ui.create_folder_button.isChecked():
if self.main_window.autonom_ui.manual_output_folder.isChecked() \
and (str(self.main_window.autonom_ui.manual_output_folder_field.text()).strip() == ""):
return False
else:
return True
else:
return True
| {
"repo_name": "neutrons/FastGR",
"path": "addie/autoNOM/step1_utilities.py",
"copies": "1",
"size": "1894",
"license": "mit",
"hash": -6315398265485473000,
"line_mean": 35.4230769231,
"line_max": 121,
"alpha_frac": 0.5876451954,
"autogenerated": false,
"ratio": 3.8574338085539717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9939798479875825,
"avg_score": 0.0010561048156293444,
"num_lines": 52
} |
from __future__ import absolute_import, division, print_function
class VisitorHistory(object):
"""
``VisitorHistory`` objects store fully or partially aggregated state about
the previous events related to a given visitor. Instances of this class are
intended to be stored with a reference to the visitor, like in a key/value
store keyed by vistor ID.
"""
def __init__(self):
self.nonbot = False
self.nonbot_queue = []
self.goals = set()
self.variants = set()
self.ips = set()
self.user_agents = set()
# Set of (goal name, rollup key, bucket start) that have already been
# counted.
self.conversion_keys = set()
# Set of (test name, selected, rollup key, bucket start) that have
# already been counted.
self.impression_keys = set()
# Set of (test name, selected, goal name, rollup key, bucket start)
# that have already been counted.
self.variant_conversion_keys = set()
# Dict mapping complex goal name to a list of previous conversion keys
# on that complex goal.
self.complex_keys = {}
class Test(object):
"""
Stores denormalized information about a particular test. Instances of this
class area intended to be stored with a reference to the test name, like in
a key/value store keyed by test name.
"""
def __init__(self, first_timestamp=None, last_timestamp=None,
variants=None):
self.first_timestamp = first_timestamp
self.last_timestamp = last_timestamp
self.variants = variants or set()
class Goal(object):
"""
Stores denormalized information about a particular goal. Instances of this
class area intended to be stored with a reference to the goal name, like in
a key/value store keyed by goal name.
"""
def __init__(self, value_type=None, value_format=None):
self.value_type = value_type
self.value_format = value_format
| {
"repo_name": "storborg/manhattan",
"path": "manhattan/backend/model.py",
"copies": "1",
"size": "2015",
"license": "mit",
"hash": 8321024497616162000,
"line_mean": 34.350877193,
"line_max": 79,
"alpha_frac": 0.6461538462,
"autogenerated": false,
"ratio": 4.278131634819533,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5424285481019533,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from collections import defaultdict
from future.builtins import zip
import numpy as np
from skbio.tree import TreeNode
def _walk_clades(trees, weights):
"""Walk all the clades of all the trees
Parameters
----------
trees : list of TreeNode
The trees to walk
weights : np.array
Tree weights
Returns
-------
list of tuple
The clades and support values sorted by support value such that the
most supported clade is index 0. The tuples are of the form:
(frozenset, float).
defaultdict(float)
The edge lengths, keyed by frozenset of the clade, and valued by the
weighted average length of the clade by the trees the clade was
observed in.
"""
clade_counts = defaultdict(float)
edge_lengths = defaultdict(float)
total = weights.sum()
# get clade counts
tipnames_f = lambda n: [n.name] if n.is_tip() else []
for tree, weight in zip(trees, weights):
tree.cache_attr(tipnames_f, 'tip_names', frozenset)
for node in tree.postorder():
tip_names = node.tip_names
# if node.length is not None, fetch it and weight it
length = node.length * weight if node.length is not None else None
clade_counts[tip_names] += weight
if length is None:
edge_lengths[tip_names] = None
else:
edge_lengths[tip_names] += length / total
# sort clades by number times observed
clade_counts = sorted(clade_counts.items(), key=lambda x: len(x[0]),
reverse=True)
return clade_counts, edge_lengths
def _filter_clades(clade_counts, cutoff_threshold):
"""Filter clades that not well supported or are contradicted
Parameters
----------
clade_counts : list of tuple
Where the first element in each tuple is the frozenset of the clade,
and the second element is the support value. It is expected that this
list is sorted by descending order by support.
cutoff_threshold : float
The minimum weighted observation count that a clade must have to be
considered supported.
Returns
-------
dict
A dict of the accepted clades, keyed by the frozenset of the clade and
valued by the support value.
"""
accepted_clades = {}
for clade, count in clade_counts:
conflict = False
if count <= cutoff_threshold:
continue
if len(clade) > 1:
# check the current clade against all the accepted clades to see if
# it conflicts. A conflict is defined as:
# 1. the clades are not disjoint
# 2. neither clade is a subset of the other
for accepted_clade in accepted_clades:
intersect = clade.intersection(accepted_clade)
subset = clade.issubset(accepted_clade)
superset = clade.issuperset(accepted_clade)
if intersect and not (subset or superset):
conflict = True
if conflict is False:
accepted_clades[clade] = count
return accepted_clades
def _build_trees(clade_counts, edge_lengths, support_attr):
"""Construct the trees with support
Parameters
----------
clade_counts : dict
Keyed by the frozenset of the clade and valued by the support
edge_lengths : dict
Keyed by the frozenset of the clade and valued by the weighted length
support_attr : str
The name of the attribute to hold the support value
Returns
-------
list of TreeNode
A list of the constructed trees
"""
nodes = {}
queue = [(len(clade), clade) for clade in clade_counts]
while queue:
# The values within the queue are updated on each iteration, so it
# doesn't look like an insertion sort will make sense unfortunately
queue.sort()
(clade_size, clade) = queue.pop(0)
new_queue = []
# search for ancestors of clade
for (_, ancestor) in queue:
if clade.issubset(ancestor):
# update ancestor such that, in the following example:
# ancestor == {1, 2, 3, 4}
# clade == {2, 3}
# new_ancestor == {1, {2, 3}, 4}
new_ancestor = (ancestor - clade) | frozenset([clade])
# update references for counts and lengths
clade_counts[new_ancestor] = clade_counts.pop(ancestor)
edge_lengths[new_ancestor] = edge_lengths.pop(ancestor)
ancestor = new_ancestor
new_queue.append((len(ancestor), ancestor))
# if the clade is a tip, then we have a name
if clade_size == 1:
name = list(clade)[0]
else:
name = None
# the clade will not be in nodes if it is a tip
children = [nodes.pop(c) for c in clade if c in nodes]
length = edge_lengths[clade]
node = TreeNode(children=children, length=length, name=name)
setattr(node, support_attr, clade_counts[clade])
nodes[clade] = node
queue = new_queue
return list(nodes.values())
def majority_rule(trees, weights=None, cutoff=0.5, support_attr='support'):
r"""Determines consensus trees from a list of rooted trees
Parameters
----------
trees : list of TreeNode
The trees to operate on
weights : list or np.array of {int, float}, optional
If provided, the list must be in index order with `trees`. Each tree
will receive the corresponding weight. If omitted, all trees will be
equally weighted.
cutoff : float, 0.0 <= cutoff <= 1.0
Any clade that has <= cutoff support will be dropped. If cutoff is
< 0.5, then it is possible that ties will result. If so, ties are
broken arbitrarily depending on list sort order.
support_attr : str
The attribute to be decorated onto the resulting trees that contain the
consensus support.
Returns
-------
list of TreeNode
Multiple trees can be returned in the case of two or more disjoint sets
of tips represented on input.
Notes
-----
This code was adapted from PyCogent's majority consensus code originally
written by Matthew Wakefield. The method is based off the original
description of consensus trees in [1]_. An additional description can be
found in the Phylip manual [2]_. This method does not support majority rule
extended.
Support is computed as a weighted average of the tree weights in which the
clade was observed in. For instance, if {A, B, C} was observed in 5 trees
all with a weight of 1, its support would then be 5.
References
----------
.. [1] Margush T, McMorris FR. (1981) "Consensus n-trees." Bulletin for
Mathematical Biology 43(2) 239-44.
.. [2] http://evolution.genetics.washington.edu/phylip/doc/consense.html
Examples
--------
Computing the majority consensus, using the example from the Phylip manual
with the exception that we are computing majority rule and not majority
rule extended.
>>> from skbio.tree import TreeNode
>>> trees = [
... TreeNode.from_newick("(A,(B,(H,(D,(J,(((G,E),(F,I)),C))))));"),
... TreeNode.from_newick("(A,(B,(D,((J,H),(((G,E),(F,I)),C)))));"),
... TreeNode.from_newick("(A,(B,(D,(H,(J,(((G,E),(F,I)),C))))));"),
... TreeNode.from_newick("(A,(B,(E,(G,((F,I),((J,(H,D)),C))))));"),
... TreeNode.from_newick("(A,(B,(E,(G,((F,I),(((J,H),D),C))))));"),
... TreeNode.from_newick("(A,(B,(E,((F,I),(G,((J,(H,D)),C))))));"),
... TreeNode.from_newick("(A,(B,(E,((F,I),(G,(((J,H),D),C))))));"),
... TreeNode.from_newick("(A,(B,(E,((G,(F,I)),((J,(H,D)),C)))));"),
... TreeNode.from_newick("(A,(B,(E,((G,(F,I)),(((J,H),D),C)))));")]
>>> consensus = majority_rule(trees, cutoff=0.5)[0]
>>> print(consensus.ascii_art())
/-E
|
| /-G
/--------| |
| | | /-F
| | |---------|
| \--------| \-I
| |
| | /-C
/--------| | |
| | \--------| /-D
| | | |
| | \--------|--J
---------| | |
| | \-H
| |
| \-B
|
\-A
>>> for node in consensus.non_tips():
... support_value = node.support
... names = ' '.join([n.name for n in node.tips()])
... print("Tips: %s, support: %s" % (names, support_value))
Tips: F I, support: 9.0
Tips: D J H, support: 6.0
Tips: C D J H, support: 6.0
Tips: G F I C D J H, support: 6.0
Tips: E G F I C D J H, support: 9.0
Tips: E G F I C D J H B, support: 9.0
In the next example, multiple trees will be returned which can happen if
clades are not well supported across the trees. In addition, this can arise
if not all tips are present across all trees.
>>> trees = [
... TreeNode.from_newick("((a,b),(c,d),(e,f))"),
... TreeNode.from_newick("(a,(c,d),b,(e,f))"),
... TreeNode.from_newick("((c,d),(e,f),b)"),
... TreeNode.from_newick("(a,(c,d),(e,f))")]
>>> consensus_trees = majority_rule(trees)
>>> print(len(consensus_trees))
4
>>> for tree in consensus_trees:
... print(tree.ascii_art())
--b
--a
/-f
---------|
\-e
/-d
---------|
\-c
"""
if weights is None:
weights = np.ones(len(trees), dtype=float)
else:
weights = np.asarray(weights)
if len(weights) != len(trees):
raise ValueError("Number of weights and trees differ!")
cutoff_threshold = cutoff * weights.sum()
clade_counts, edge_lengths = _walk_clades(trees, weights)
clade_counts = _filter_clades(clade_counts, cutoff_threshold)
trees = _build_trees(clade_counts, edge_lengths, support_attr)
return trees
| {
"repo_name": "JWDebelius/scikit-bio",
"path": "skbio/tree/_majority_rule.py",
"copies": "2",
"size": "11013",
"license": "bsd-3-clause",
"hash": 9112306109940162000,
"line_mean": 34.8729641694,
"line_max": 79,
"alpha_frac": 0.5386361573,
"autogenerated": false,
"ratio": 3.962936308024469,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00005616084465910367,
"num_lines": 307
} |
from __future__ import absolute_import, division, print_function
def filter_hdulist_by_shape(hdulist, use_hdu='all'):
"""
Remove empty HDUs, and ensure that all HDUs can be
packed into a single Data object (ie have the same shape)
Parameters
----------
use_hdu : 'all' or list of integers (optional)
Which HDUs to use
Returns
-------
a new HDUList
"""
from ...external.astro import fits
# If only a subset are requested, extract those
if use_hdu != 'all':
hdulist = [hdulist[hdu] for hdu in use_hdu]
# Now only keep HDUs that are not tables or empty.
valid_hdus = []
for hdu in hdulist:
if (isinstance(hdu, fits.PrimaryHDU) or \
isinstance(hdu, fits.ImageHDU)) and \
hdu.data is not None:
valid_hdus.append(hdu)
# Check that dimensions of all HDU are the same
# Allow for HDU's that have no data.
reference_shape = valid_hdus[0].data.shape
for hdu in valid_hdus:
if hdu.data.shape != reference_shape:
raise Exception("HDUs are not all the same dimensions")
return valid_hdus
def extract_data_fits(filename, use_hdu='all'):
'''
Extract non-tabular HDUs from a FITS file. If `use_hdu` is 'all', then
all non-tabular HDUs are extracted, otherwise only the ones specified
by `use_hdu` are extracted (`use_hdu` should then contain a list of
integers). If the requested HDUs do not have the same dimensions, an
Exception is raised.
'''
from ...external.astro import fits
# Read in all HDUs
hdulist = fits.open(filename, ignore_blank=True)
hdulist = filter_hdulist_by_shape(hdulist)
# Extract data
arrays = {}
for hdu in hdulist:
arrays[hdu.name] = hdu.data
return arrays
def extract_hdf5_datasets(handle):
'''
Recursive function that returns a dictionary with all the datasets
found in an HDF5 file or group. `handle` should be an instance of
h5py.highlevel.File or h5py.highlevel.Group.
'''
import h5py
datasets = {}
for group in handle:
if isinstance(handle[group], h5py.highlevel.Group):
sub_datasets = extract_hdf5_datasets(handle[group])
for key in sub_datasets:
datasets[key] = sub_datasets[key]
elif isinstance(handle[group], h5py.highlevel.Dataset):
datasets[handle[group].name] = handle[group]
return datasets
def extract_data_hdf5(filename, use_datasets='all'):
'''
Extract non-tabular datasets from an HDF5 file. If `use_datasets` is
'all', then all non-tabular datasets are extracted, otherwise only the
ones specified by `use_datasets` are extracted (`use_datasets` should
then contain a list of paths). If the requested datasets do not have
the same dimensions, an Exception is raised.
'''
import h5py
# Open file
file_handle = h5py.File(filename, 'r')
# Define function to read
# Read in all datasets
datasets = extract_hdf5_datasets(file_handle)
# Only keep non-tabular datasets
remove = []
for key in datasets:
if datasets[key].dtype.fields is not None:
remove.append(key)
for key in remove:
datasets.pop(key)
# Check that dimensions of all datasets are the same
reference_shape = datasets[list(datasets.keys())[0]].value.shape
for key in datasets:
if datasets[key].value.shape != reference_shape:
raise Exception("Datasets are not all the same dimensions")
# Extract data
arrays = {}
for key in datasets:
arrays[key] = datasets[key].value
# Close HDF5 file
file_handle.close()
return arrays
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/data_factories/io.py",
"copies": "1",
"size": "3727",
"license": "bsd-3-clause",
"hash": 4740536820061952000,
"line_mean": 28.816,
"line_max": 74,
"alpha_frac": 0.6452911189,
"autogenerated": false,
"ratio": 3.7419678714859437,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9885282519797709,
"avg_score": 0.0003952941176470588,
"num_lines": 125
} |
from __future__ import absolute_import, division, print_function
'''
From https://github.com/fchollet/deep-learning-models
'''
import json
from keras.utils.data_utils import get_file
from keras import backend as K
import numpy as np
CLASS_INDEX = None
CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json'
def preprocess_input(x, dim_ordering='default', mean=None, std=None):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
assert dim_ordering in {'tf', 'th'}
if mean is not None:
x = x - np.array(mean, dtype='float32')
if std is not None:
if 0.0 in std:
raise ValueError('0 is not allowed as a custom std.')
x = x / np.array(std, dtype='float32')
if mean is None and std is None:
if dim_ordering == 'th':
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
# 'RGB'->'BGR'
x = x[:, ::-1, :, :]
else:
x[:, :, :, 0] -= 103.939
x[:, :, :, 1] -= 116.779
x[:, :, :, 2] -= 123.68
# 'RGB'->'BGR'
x = x[:, :, :, ::-1]
return x
def decode_imagenet_predictions(preds, top=5):
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 1000:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 1000)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models')
CLASS_INDEX = json.load(open(fpath))
results = []
for pred in preds:
top_indices = pred.argsort()[-top:][::-1]
result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]
results.append(result)
return results
| {
"repo_name": "keplr-io/quiver",
"path": "quiver_engine/imagenet_utils.py",
"copies": "1",
"size": "2049",
"license": "mit",
"hash": 1149729683121910100,
"line_mean": 31.5238095238,
"line_max": 105,
"alpha_frac": 0.5275744265,
"autogenerated": false,
"ratio": 3.5696864111498257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45972608376498253,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import datashape
from datashape import DataShape, Option, Record, Unit, dshape, var, Fixed, Var
from datashape.predicates import isscalar, iscollection, isrecord
from toolz import (
isdistinct, frequencies, concat as tconcat, unique, get, first,
)
from odo.utils import copydoc
from .core import common_subexpression
from .expressions import Expr, ElemWise, label, Field
from .expressions import dshape_method_list
from ..compatibility import zip_longest, _strtypes
__all__ = ['Sort', 'Distinct', 'Head', 'Merge', 'IsIn', 'isin', 'distinct',
'merge', 'head', 'sort', 'Join', 'join', 'transform', 'Concat',
'concat', 'Tail', 'tail']
class Sort(Expr):
""" Table in sorted order
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.sort('amount', ascending=False).schema
dshape("{name: string, amount: int32}")
Some backends support sorting by arbitrary rowwise tables, e.g.
>>> accounts.sort(-accounts.amount) # doctest: +SKIP
"""
__slots__ = '_hash', '_child', '_key', 'ascending'
@property
def dshape(self):
return self._child.dshape
@property
def key(self):
if self._key is () or self._key is None:
return self._child.fields[0]
if isinstance(self._key, tuple):
return list(self._key)
else:
return self._key
def _len(self):
return self._child._len()
@property
def _name(self):
return self._child._name
def __str__(self):
return "%s.sort(%s, ascending=%s)" % (self._child, repr(self._key),
self.ascending)
def sort(child, key=None, ascending=True):
""" Sort a collection
Parameters
----------
key : str, list of str, or Expr
Defines by what you want to sort.
* A single column string: ``t.sort('amount')``
* A list of column strings: ``t.sort(['name', 'amount'])``
* An expression: ``t.sort(-t.amount)``
ascending : bool, optional
Determines order of the sort
"""
if not isrecord(child.dshape.measure):
key = None
if isinstance(key, list):
key = tuple(key)
return Sort(child, key, ascending)
class Distinct(Expr):
""" Remove duplicate elements from an expression
Parameters
----------
on : tuple of :class:`~blaze.expr.expressions.Field`
The subset of fields or names of fields to be distinct on.
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int, id: int}')
>>> e = distinct(t)
>>> data = [('Alice', 100, 1),
... ('Bob', 200, 2),
... ('Alice', 100, 1)]
>>> from blaze.compute.python import compute
>>> sorted(compute(e, data))
[('Alice', 100, 1), ('Bob', 200, 2)]
Use a subset by passing `on`:
>>> import pandas as pd
>>> e = distinct(t, 'name')
>>> data = pd.DataFrame([['Alice', 100, 1],
... ['Alice', 200, 2],
... ['Bob', 100, 1],
... ['Bob', 200, 2]],
... columns=['name', 'amount', 'id'])
>>> compute(e, data)
name amount id
0 Alice 100 1
1 Bob 100 1
"""
__slots__ = '_hash', '_child', 'on'
@property
def dshape(self):
return datashape.var * self._child.dshape.measure
@property
def fields(self):
return self._child.fields
@property
def _name(self):
return self._child._name
def __str__(self):
return 'distinct({child}{on})'.format(
child=self._child,
on=(', ' if self.on else '') + ', '.join(map(str, self.on))
)
@copydoc(Distinct)
def distinct(expr, *on):
fields = frozenset(expr.fields)
_on = []
append = _on.append
for n in on:
if isinstance(n, Field):
if n._child.isidentical(expr):
n = n._name
else:
raise ValueError('{0} is not a field of {1}'.format(n, expr))
if not isinstance(n, _strtypes):
raise TypeError('on must be a name or field, not: {0}'.format(n))
elif n not in fields:
raise ValueError('{0} is not a field of {1}'.format(n, expr))
append(n)
return Distinct(expr, tuple(_on))
class _HeadOrTail(Expr):
__slots__ = '_hash', '_child', 'n'
@property
def dshape(self):
return self.n * self._child.dshape.subshape[0]
def _len(self):
return min(self._child._len(), self.n)
@property
def _name(self):
return self._child._name
def __str__(self):
return '%s.%s(%d)' % (self._child, type(self).__name__.lower(), self.n)
class Head(_HeadOrTail):
""" First `n` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.head(5).dshape
dshape("5 * {name: string, amount: int32}")
See Also
--------
blaze.expr.collections.Tail
"""
pass
@copydoc(Head)
def head(child, n=10):
return Head(child, n)
class Tail(_HeadOrTail):
""" Last `n` elements of collection
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, amount: int}')
>>> accounts.tail(5).dshape
dshape("5 * {name: string, amount: int32}")
See Also
--------
blaze.expr.collections.Head
"""
pass
@copydoc(Tail)
def tail(child, n=10):
return Tail(child, n)
def transform(t, replace=True, **kwargs):
""" Add named columns to table
>>> from blaze import symbol
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> transform(t, z=t.x + t.y).fields
['x', 'y', 'z']
"""
if replace and set(t.fields).intersection(set(kwargs)):
t = t[[c for c in t.fields if c not in kwargs]]
args = [t] + [v.label(k) for k, v in sorted(kwargs.items(), key=first)]
return merge(*args)
def schema_concat(exprs):
""" Concatenate schemas together. Supporting both Records and Units
In the case of Units, the name is taken from expr.name
"""
names, values = [], []
for c in exprs:
schema = c.schema[0]
if isinstance(schema, Option):
schema = schema.ty
if isinstance(schema, Record):
names.extend(schema.names)
values.extend(schema.types)
elif isinstance(schema, Unit):
names.append(c._name)
values.append(schema)
else:
raise TypeError("All schemas must have Record or Unit shape."
"\nGot %s" % c.schema[0])
return dshape(Record(list(zip(names, values))))
class Merge(ElemWise):
""" Merge many fields together
Examples
--------
>>> from blaze import symbol
>>> accounts = symbol('accounts', 'var * {name: string, x: int, y: real}')
>>> merge(accounts.name, z=accounts.x + accounts.y).fields
['name', 'z']
"""
__slots__ = '_hash', '_child', 'children'
@property
def schema(self):
return schema_concat(self.children)
@property
def fields(self):
return list(tconcat(child.fields for child in self.children))
def _subterms(self):
yield self
for i in self.children:
for node in i._subterms():
yield node
def _get_field(self, key):
for child in self.children:
if key in child.fields:
if isscalar(child.dshape.measure):
return child
else:
return child[key]
def _project(self, key):
if not isinstance(key, (tuple, list)):
raise TypeError("Expected tuple or list, got %s" % key)
return merge(*[self[c] for c in key])
def _leaves(self):
return list(unique(tconcat(i._leaves() for i in self.children)))
@copydoc(Merge)
def merge(*exprs, **kwargs):
if len(exprs) + len(kwargs) == 1:
if exprs:
return exprs[0]
if kwargs:
[(k, v)] = kwargs.items()
return v.label(k)
# Get common sub expression
exprs += tuple(label(v, k) for k, v in sorted(kwargs.items(), key=first))
try:
child = common_subexpression(*exprs)
except Exception:
raise ValueError("No common subexpression found for input expressions")
result = Merge(child, exprs)
if not isdistinct(result.fields):
raise ValueError(
"Repeated columns found: " + ', '.join(
k for k, v in frequencies(result.fields).items() if v > 1
),
)
return result
def unpack(l):
""" Unpack items from collections of nelements 1
>>> unpack('hello')
'hello'
>>> unpack(['hello'])
'hello'
"""
if isinstance(l, (tuple, list, set)) and len(l) == 1:
return next(iter(l))
else:
return l
class Join(Expr):
""" Join two tables on common columns
Parameters
----------
lhs, rhs : Expr
Expressions to join
on_left : str, optional
The fields from the left side to join on.
If no ``on_right`` is passed, then these are the fields for both
sides.
on_right : str, optional
The fields from the right side to join on.
how : {'inner', 'outer', 'left', 'right'}
What type of join to perform.
suffixes: pair of str
The suffixes to be applied to the left and right sides
in order to resolve duplicate field names.
Examples
--------
>>> from blaze import symbol
>>> names = symbol('names', 'var * {name: string, id: int}')
>>> amounts = symbol('amounts', 'var * {amount: int, id: int}')
Join tables based on shared column name
>>> joined = join(names, amounts, 'id')
Join based on different column names
>>> amounts = symbol('amounts', 'var * {amount: int, acctNumber: int}')
>>> joined = join(names, amounts, 'id', 'acctNumber')
See Also
--------
blaze.expr.collections.Merge
"""
__slots__ = (
'_hash', 'lhs', 'rhs', '_on_left', '_on_right', 'how', 'suffixes',
)
__inputs__ = 'lhs', 'rhs'
@property
def on_left(self):
if isinstance(self._on_left, tuple):
return list(self._on_left)
else:
return self._on_left
@property
def on_right(self):
if isinstance(self._on_right, tuple):
return list(self._on_right)
else:
return self._on_right
@property
def schema(self):
"""
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> s = symbol('t', 'var * {name: string, id: int}')
>>> join(t, s).schema
dshape("{name: string, amount: int32, id: int32}")
>>> join(t, s, how='left').schema
dshape("{name: string, amount: int32, id: ?int32}")
Overlapping but non-joined fields append _left, _right
>>> a = symbol('a', 'var * {x: int, y: int}')
>>> b = symbol('b', 'var * {x: int, y: int}')
>>> join(a, b, 'x').fields
['x', 'y_left', 'y_right']
"""
option = lambda dt: dt if isinstance(dt, Option) else Option(dt)
joined = [[name, dt] for name, dt in self.lhs.schema[0].parameters[0]
if name in self.on_left]
left = [[name, dt] for name, dt in
zip(self.lhs.fields, types_of_fields(
self.lhs.fields, self.lhs))
if name not in self.on_left]
right = [[name, dt] for name, dt in
zip(self.rhs.fields, types_of_fields(
self.rhs.fields, self.rhs))
if name not in self.on_right]
# Handle overlapping but non-joined case, e.g.
left_other = [name for name, dt in left if name not in self.on_left]
right_other = [name for name, dt in right if name not in self.on_right]
overlap = set.intersection(set(left_other), set(right_other))
left_suffix, right_suffix = self.suffixes
left = [[name + left_suffix if name in overlap else name, dt]
for name, dt in left]
right = [[name + right_suffix if name in overlap else name, dt]
for name, dt in right]
if self.how in ('right', 'outer'):
left = [[name, option(dt)] for name, dt in left]
if self.how in ('left', 'outer'):
right = [[name, option(dt)] for name, dt in right]
return dshape(Record(joined + left + right))
@property
def dshape(self):
# TODO: think if this can be generalized
return var * self.schema
def types_of_fields(fields, expr):
""" Get the types of fields in an expression
Examples
--------
>>> from blaze import symbol
>>> expr = symbol('e', 'var * {x: int64, y: float32}')
>>> types_of_fields('y', expr)
ctype("float32")
>>> types_of_fields(['y', 'x'], expr)
(ctype("float32"), ctype("int64"))
>>> types_of_fields('x', expr.x)
ctype("int64")
"""
if isinstance(expr.dshape.measure, Record):
return get(fields, expr.dshape.measure)
else:
if isinstance(fields, (tuple, list, set)):
assert len(fields) == 1
fields = fields[0]
assert fields == expr._name
return expr.dshape.measure
@copydoc(Join)
def join(lhs, rhs, on_left=None, on_right=None,
how='inner', suffixes=('_left', '_right')):
if not on_left and not on_right:
on_left = on_right = unpack(list(sorted(
set(lhs.fields) & set(rhs.fields),
key=lhs.fields.index)))
if not on_right:
on_right = on_left
if isinstance(on_left, tuple):
on_left = list(on_left)
if isinstance(on_right, tuple):
on_right = list(on_right)
if not on_left or not on_right:
raise ValueError("Can not Join. No shared columns between %s and %s" %
(lhs, rhs))
if types_of_fields(on_left, lhs) != types_of_fields(on_right, rhs):
raise TypeError("Schema's of joining columns do not match")
_on_left = tuple(on_left) if isinstance(on_left, list) else on_left
_on_right = (tuple(on_right) if isinstance(on_right, list)
else on_right)
how = how.lower()
if how not in ('inner', 'outer', 'left', 'right'):
raise ValueError("How parameter should be one of "
"\n\tinner, outer, left, right."
"\nGot: %s" % how)
return Join(lhs, rhs, _on_left, _on_right, how, suffixes)
class Concat(Expr):
""" Stack tables on common columns
Parameters
----------
lhs, rhs : Expr
Collections to concatenate
axis : int, optional
The axis to concatenate on.
Examples
--------
>>> from blaze import symbol
Vertically stack tables:
>>> names = symbol('names', '5 * {name: string, id: int32}')
>>> more_names = symbol('more_names', '7 * {name: string, id: int32}')
>>> stacked = concat(names, more_names)
>>> stacked.dshape
dshape("12 * {name: string, id: int32}")
Vertically stack matrices:
>>> mat_a = symbol('a', '3 * 5 * int32')
>>> mat_b = symbol('b', '3 * 5 * int32')
>>> vstacked = concat(mat_a, mat_b, axis=0)
>>> vstacked.dshape
dshape("6 * 5 * int32")
Horizontally stack matrices:
>>> hstacked = concat(mat_a, mat_b, axis=1)
>>> hstacked.dshape
dshape("3 * 10 * int32")
See Also
--------
blaze.expr.collections.Merge
"""
__slots__ = '_hash', 'lhs', 'rhs', 'axis'
__inputs__ = 'lhs', 'rhs'
@property
def dshape(self):
axis = self.axis
ldshape = self.lhs.dshape
lshape = ldshape.shape
return DataShape(
*(lshape[:axis] + (
_shape_add(lshape[axis], self.rhs.dshape.shape[axis]),
) + lshape[axis + 1:] + (ldshape.measure,))
)
def _shape_add(a, b):
if isinstance(a, Var) or isinstance(b, Var):
return var
return Fixed(a.val + b.val)
@copydoc(Concat)
def concat(lhs, rhs, axis=0):
ldshape = lhs.dshape
rdshape = rhs.dshape
if ldshape.measure != rdshape.measure:
raise TypeError(
'Mismatched measures: {l} != {r}'.format(
l=ldshape.measure, r=rdshape.measure
),
)
lshape = ldshape.shape
rshape = rdshape.shape
for n, (a, b) in enumerate(zip_longest(lshape, rshape, fillvalue=None)):
if n != axis and a != b:
raise TypeError(
'Shapes are not equal along axis {n}: {a} != {b}'.format(
n=n, a=a, b=b,
),
)
if axis < 0 or 0 < len(lshape) <= axis:
raise ValueError(
"Invalid axis '{a}', must be in range: [0, {n})".format(
a=axis, n=len(lshape)
),
)
return Concat(lhs, rhs, axis)
class IsIn(ElemWise):
"""Check if an expression contains values from a set.
Return a boolean expression indicating whether another expression
contains values that are members of a collection.
Parameters
----------
expr : Expr
Expression whose elements to check for membership in `keys`
keys : Sequence
Elements to test against. Blaze stores this as a ``frozenset``.
Examples
--------
Check if a vector contains any of 1, 2 or 3:
>>> from blaze import symbol
>>> t = symbol('t', '10 * int64')
>>> expr = t.isin([1, 2, 3])
>>> expr.dshape
dshape("10 * bool")
"""
__slots__ = '_hash', '_child', '_keys'
@property
def schema(self):
return datashape.bool_
def __str__(self):
return '%s.%s(%s)' % (self._child, type(self).__name__.lower(),
self._keys)
@copydoc(IsIn)
def isin(expr, keys):
if isinstance(keys, Expr):
raise TypeError('keys argument cannot be an expression, '
'it must be an iterable object such as a list, '
'tuple or set')
return IsIn(expr, frozenset(keys))
dshape_method_list.extend([
(iscollection, set([sort, head, tail])),
(lambda ds: len(ds.shape) == 1, set([distinct])),
(lambda ds: len(ds.shape) == 1 and isscalar(ds.measure), set([isin])),
])
| {
"repo_name": "scls19fr/blaze",
"path": "blaze/expr/collections.py",
"copies": "6",
"size": "18724",
"license": "bsd-3-clause",
"hash": -4491809594082047000,
"line_mean": 26.8630952381,
"line_max": 79,
"alpha_frac": 0.5464644307,
"autogenerated": false,
"ratio": 3.7018584420719653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7248322872771965,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
IMPORT_LIB = r'^import\s+([\w\.]+)'
FROM_IMPORT_LIB = r'^from\s+([\w\.]+)\s+import'
TRIPLE_DOUBLE = '"""'
TRIPLE_SINGLE = "'''"
def _is_hash_a_comment(s):
return ("'" not in s or s.index('#') < s.index("'")) and ('"' not in s or s.index('#') < s.index('"'))
def _get_doc_string_by_type(s, quote_type):
opposite_quote = {TRIPLE_DOUBLE: "'", TRIPLE_SINGLE: '"'}[quote_type]
if '#' in s and s.index('#') < s.index(quote_type) and _is_hash_a_comment(s):
return len(s), False
if opposite_quote in s and s.index(opposite_quote) < s.index(quote_type):
return s.index(opposite_quote, s.index(opposite_quote) + 1) + 1, False # fails on backslash '\''
return s.index(quote_type), True
def _get_part(s, base_index, quote):
points = []
index, in_quote = _get_doc_string_by_type(s, quote_type=quote)
if in_quote:
points.append((index + base_index, quote))
s = s[index + 3:]
base_index += index + 3
try:
points.append((s.index(quote) + 3 + base_index, quote))
base_index += s.index(quote) + 3
s = s[s.index(quote) + 3:]
except ValueError:
return "", base_index, points
else:
base_index += index
s = s[index:]
return s, base_index, points
def get_doc_string(s):
points = []
base_index = 0
while s:
double = s.find(TRIPLE_DOUBLE)
single = s.find(TRIPLE_SINGLE)
if double == single == -1:
break
elif (double < single or single == -1) and double != -1:
s, base_index, p2 = _get_part(s, base_index, TRIPLE_DOUBLE)
points += p2
elif double > single or double == -1:
s, base_index, p2 = _get_part(s, base_index, TRIPLE_SINGLE)
points += p2
else:
raise Exception('impossible')
return points
| {
"repo_name": "bootandy/imps",
"path": "imps/strings.py",
"copies": "1",
"size": "1951",
"license": "apache-2.0",
"hash": 6554600673643824000,
"line_mean": 27.6911764706,
"line_max": 106,
"alpha_frac": 0.5504869298,
"autogenerated": false,
"ratio": 3.156957928802589,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4207444858602589,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import re
import socket
import sys
from itertools import count
from functools import partial
from subprocess import Popen
from subprocess import PIPE
from time import sleep
import click
from distutils.spawn import find_executable
from compose.cli.docker_client import docker_client
from bag8.exceptions import CheckCallFailed, WaitLinkFailed
RE_WORD = re.compile('\W')
call = partial(Popen, stdout=PIPE, stderr=PIPE)
def check_call(args, exit=True, **kwargs):
proc = call(args, **kwargs)
out, err = proc.communicate()
if not proc.returncode:
return out, err, proc.returncode
if exit:
click.echo(out)
click.echo(err)
sys.exit(proc.returncode)
else:
raise CheckCallFailed(out + '\n' + err)
def exec_(args):
# byebye!
os.execv(find_executable(args[0]), args)
def wait_(host, port, max_retry=10, retry_interval=1):
counter = count()
while counter.next() < max_retry:
try:
return socket.socket().connect((host, int(port)))
except socket.error:
click.echo('wait for {}:{}'.format(host, port))
sleep(retry_interval)
raise WaitLinkFailed("can't link to {}:{}".format(host, port))
def confirm(msg):
click.echo('')
click.echo(msg)
click.echo('proceed ?')
char = None
while char not in ['y', 'n']:
click.echo('Yes (y) or no (n) ?')
char = click.getchar()
# Yes
if char == 'y':
return True
def inspect(container, client=None):
client = client or docker_client()
return client.inspect_container(container)
def simple_name(text):
return RE_WORD.sub('', text)
def write_conf(path, content, bak_path=None):
# keep
if bak_path:
call(['cp', path, bak_path])
cmd = [
'sudo',
'--reset-timestamp',
'tee',
path,
]
# confirm
if not confirm('`{0}` ?'.format(' '.join(cmd))):
return
process = call(cmd, stdin=PIPE)
process.stdin.write(content)
process.stdin.close()
exit_code = process.wait()
if exit_code != 0:
raise Exception('Failed to update {0}'.format(path))
| {
"repo_name": "novafloss/bag8",
"path": "bag8/utils.py",
"copies": "1",
"size": "2243",
"license": "mit",
"hash": -8531375075510445000,
"line_mean": 20.1603773585,
"line_max": 66,
"alpha_frac": 0.6192599198,
"autogenerated": false,
"ratio": 3.6003210272873196,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9717833917038403,
"avg_score": 0.00034940600978336826,
"num_lines": 106
} |
from __future__ import (absolute_import, division, print_function)
import sys
import warnings
try:
from setuptools import setup
except ImportError:
try:
from setuptools.core import setup
except ImportError:
from distutils.core import setup
from distutils.core import setup, Extension
import numpy
MAJOR = 0
MINOR = 0
MICRO = 0
ISRELEASED = False
SNAPSHOT = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
FULLVERSION = VERSION
print(FULLVERSION)
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
if SNAPSHOT:
pipe = None
for cmd in ['git', 'git.cmd']:
try:
pipe = subprocess.Popen([cmd, "describe", "--always",
"--match", "v[0-9\/]*"],
stdout=subprocess.PIPE)
(so, serr) = pipe.communicate()
print(so, serr)
if pipe.returncode == 0:
pass
print('here')
except:
pass
if pipe is None or pipe.returncode != 0:
warnings.warn("WARNING: Couldn't get git revision, "
"using generic version string")
else:
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
# use result of git describe as version string
FULLVERSION = VERSION + '-' + rev.lstrip('v')
break
else:
FULLVERSION += QUALIFIER
setup(
name='xray-vision',
version=FULLVERSION,
author='Brookhaven National Lab',
packages=['xray_vision',
'xray_vision.qt_widgets',
'xray_vision.messenger', 'xray_vision.messenger.mpl',
'xray_vision.backend', 'xray_vision.backend.mpl',
'xray_vision.xrf', 'xray_vision.xrf.model',
'xray_vision.xrf.view',
'xray_vision.mask', 'xray_vision.utils'],
)
| {
"repo_name": "giltis/xray-vision",
"path": "setup.py",
"copies": "3",
"size": "2090",
"license": "bsd-3-clause",
"hash": -4556354849111795000,
"line_mean": 28.4366197183,
"line_max": 69,
"alpha_frac": 0.5263157895,
"autogenerated": false,
"ratio": 4.163346613545817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6189662403045817,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
"""
A module for spec utils
"""
# We need to be extra careful with python versions
# Ref : https://docs.python.org/2/library/modules.html?highlight=imports
# Ref : https://docs.python.org/3/library/modules.html?highlight=imports
import os
import sys
import imp
import warnings
from ._utils import _ImportError, _verbose_message
#from ._locks import _ModuleLockManager
try:
from importlib.machinery import ModuleSpec
except ImportError:
# Module spec, ported from python3
# to provide a py2/py3 API
class ModuleSpec:
"""The specification for a module, used for loading.
A module's spec is the source for information about the module. For
data associated with the module, including source, use the spec's
loader.
`name` is the absolute name of the module. `loader` is the loader
to use when loading the module. `parent` is the name of the
package the module is in. The parent is derived from the name.
`is_package` determines if the module is considered a package or
not. On modules this is reflected by the `__path__` attribute.
`origin` is the specific location used by the loader from which to
load the module, if that information is available. When filename is
set, origin will match.
`has_location` indicates that a spec's "origin" reflects a location.
When this is True, `__file__` attribute of the module is set.
`cached` is the location of the cached bytecode file, if any. It
corresponds to the `__cached__` attribute.
`submodule_search_locations` is the sequence of path entries to
search when importing submodules. If set, is_package should be
True--and False otherwise.
Packages are simply modules that (may) have submodules. If a spec
has a non-None value in `submodule_search_locations`, the import
system will consider modules loaded from the spec as packages.
Only finders (see importlib.abc.MetaPathFinder and
importlib.abc.PathEntryFinder) should modify ModuleSpec instances.
"""
def __init__(self, name, loader, origin=None, loader_state=None,
is_package=None):
self.name = name
self.loader = loader
self.origin = origin
self.loader_state = loader_state
self.submodule_search_locations = [] if is_package else None
# file-location attributes
self._set_fileattr = False
self._cached = None
def __repr__(self):
args = ['name={!r}'.format(self.name),
'loader={!r}'.format(self.loader)]
if self.origin is not None:
args.append('origin={!r}'.format(self.origin))
if self.submodule_search_locations is not None:
args.append('submodule_search_locations={}'
.format(self.submodule_search_locations))
return '{}({})'.format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
smsl = self.submodule_search_locations
try:
return (self.name == other.name and
self.loader == other.loader and
self.origin == other.origin and
smsl == other.submodule_search_locations and
#self.cached == other.cached and
self.has_location == other.has_location)
except AttributeError:
return False
# @property
# def cached(self):
# if self._cached is None:
# if self.origin is not None and self._set_fileattr:
# if _bootstrap_external is None:
# raise NotImplementedError
# self._cached = _bootstrap_external._get_cached(self.origin)
# return self._cached
# @cached.setter
# def cached(self, cached):
# self._cached = cached
@property
def parent(self):
"""The name of the module's parent."""
if self.submodule_search_locations is None:
return self.name.rpartition('.')[0]
else:
return self.name
@property
def has_location(self):
return self._set_fileattr
@has_location.setter
def has_location(self, value):
self._set_fileattr = bool(value)
# def _module_repr_from_spec(spec):
# """Return the repr to use for the module."""
# # We mostly replicate _module_repr() using the spec attributes.
# name = '?' if spec.name is None else spec.name
# if spec.origin is None:
# if spec.loader is None:
# return '<module {!r}>'.format(name)
# else:
# return '<module {!r} ({!r})>'.format(name, spec.loader)
# else:
# if spec.has_location:
# return '<module {!r} from {!r}>'.format(name, spec.origin)
# else:
# return '<module {!r} ({})>'.format(spec.name, spec.origin)
try:
from importlib.machinery import module_from_spec
except ImportError:
def _new_module(name):
return type(sys)(name)
def _init_module_attrs(spec, module, override=False):
# The passed-in module may be not support attribute assignment,
# in which case we simply don't set the attributes.
# __name__
if (override or getattr(module, '__name__', None) is None):
try:
module.__name__ = spec.name
except AttributeError:
pass
# __loader__
if override or getattr(module, '__loader__', None) is None:
loader = spec.loader
# if loader is None:
# # A backward compatibility hack.
# if spec.submodule_search_locations is not None:
# loader = _NamespaceLoader.__new__(_NamespaceLoader)
# loader.path = spec.submodule_search_locations
try:
module.__loader__ = loader
except AttributeError:
pass
# __package__
if override or getattr(module, '__package__', None) is None:
try:
module.__package__ = spec.parent
except AttributeError:
pass
# __spec__
try:
module.__spec__ = spec
except AttributeError:
pass
# __path__
if override or getattr(module, '__path__', None) is None:
if spec.submodule_search_locations is not None:
try:
module.__path__ = spec.submodule_search_locations
except AttributeError:
pass
# __file__/__cached__
if spec.has_location:
if override or getattr(module, '__file__', None) is None:
try:
module.__file__ = spec.origin
except AttributeError:
pass
# No cache implemented in filefinder2 currently
# if override or getattr(module, '__cached__', None) is None:
# if spec.cached is not None:
# try:
# module.__cached__ = spec.cached
# except AttributeError:
# pass
return module
def module_from_spec(spec):
"""Create a module based on the provided spec."""
# Typically loaders will not implement create_module().
module = None
if hasattr(spec.loader, 'create_module'):
# If create_module() returns `None` then it means default
# module creation should be used.
module = spec.loader.create_module(spec)
elif hasattr(spec.loader, 'exec_module'):
warnings.warn('starting in Python 3.6, loaders defining exec_module() '
'must also define create_module()',
DeprecationWarning, stacklevel=2)
if module is None:
module = _new_module(spec.name)
_init_module_attrs(spec, module)
return module
# def _exec(spec, module):
# """Execute the spec in an existing module's namespace."""
# name = spec.name
# imp.acquire_lock()
# with _ModuleLockManager(name):
# if sys.modules.get(name) is not module:
# msg = 'module {!r} not in sys.modules'.format(name)
# raise _ImportError(msg, name=name)
# if spec.loader is None:
# if spec.submodule_search_locations is None:
# raise _ImportError('missing loader', name=spec.name)
# # namespace package
# _init_module_attrs(spec, module, override=True)
# return module
# _init_module_attrs(spec, module, override=True)
# if not hasattr(spec.loader, 'exec_module'):
# # (issue19713) Once BuiltinImporter and ExtensionFileLoader
# # have exec_module() implemented, we can add a deprecation
# # warning here.
# spec.loader.load_module(name)
# else:
# spec.loader.exec_module(module)
# return sys.modules[name]
# def _load_backward_compatible(spec):
# # (issue19713) Once BuiltinImporter and ExtensionFileLoader
# # have exec_module() implemented, we can add a deprecation
# # warning here.
# spec.loader.load_module(spec.name)
# # The module must be in sys.modules at this point!
# module = sys.modules[spec.name]
# if getattr(module, '__loader__', None) is None:
# try:
# module.__loader__ = spec.loader
# except AttributeError:
# pass
# if getattr(module, '__package__', None) is None:
# try:
# # Since module.__path__ may not line up with
# # spec.submodule_search_paths, we can't necessarily rely
# # on spec.parent here.
# module.__package__ = module.__name__
# if not hasattr(module, '__path__'):
# module.__package__ = spec.name.rpartition('.')[0]
# except AttributeError:
# pass
# if getattr(module, '__spec__', None) is None:
# try:
# module.__spec__ = spec
# except AttributeError:
# pass
# return module
#
#
# class _installed_safely:
#
# def __init__(self, module):
# self._module = module
# self._spec = module.__spec__
#
# def __enter__(self):
# # This must be done before putting the module in sys.modules
# # (otherwise an optimization shortcut in import.c becomes
# # wrong)
# self._spec._initializing = True
# sys.modules[self._spec.name] = self._module
#
# def __exit__(self, *args):
# try:
# spec = self._spec
# if any(arg is not None for arg in args):
# try:
# del sys.modules[spec.name]
# except KeyError:
# pass
# else:
# _verbose_message('import {!r} # {!r}', spec.name, spec.loader)
# finally:
# self._spec._initializing = False
#
# def _load_unlocked(spec):
# # A helper for direct use by the import system.
# if spec.loader is not None:
# # not a namespace package
# if not hasattr(spec.loader, 'exec_module'):
# return _load_backward_compatible(spec)
#
# module = module_from_spec(spec)
# with _installed_safely(module):
# if spec.loader is None:
# if spec.submodule_search_locations is None:
# raise _ImportError('missing loader', name=spec.name)
# # A namespace package so do nothing.
# else:
# spec.loader.exec_module(module)
#
# # We don't ensure that the import-related module attributes get
# # set in the sys.modules replacement case. Such modules are on
# # their own.
# return sys.modules[spec.name]
# # A method used during testing of _load_unlocked() and by
# # _load_module_shim().
# def _load(spec):
# """Return a new module object, loaded by the spec's loader.
# The module is not added to its parent.
# If a module is already in sys.modules, that existing module gets
# clobbered.
# """
# imp.acquire_lock()
# with _ModuleLockManager(spec.name):
# return _load_unlocked(spec)
| {
"repo_name": "asmodehn/filefinder2",
"path": "filefinder2/_module_utils.py",
"copies": "1",
"size": "12603",
"license": "mit",
"hash": -35201387478491412,
"line_mean": 36.9608433735,
"line_max": 83,
"alpha_frac": 0.5608981988,
"autogenerated": false,
"ratio": 4.222110552763819,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.004047603124494773,
"num_lines": 332
} |
from __future__ import absolute_import, division, print_function
"""
JVM Gateway
"""
import os
import sys
import subprocess
import time
import logging
import py4j
from threading import Thread
from py4j.java_gateway import JavaGateway, GatewayClient, launch_gateway, java_import
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def launch_gateway():
cur_dir = os.path.dirname(__file__)
jar_dir = os.path.join(cur_dir,"java_libs")
jar_file = "seqreader-app-1.0-SNAPSHOT-jar-with-dependencies.jar"
jar_full = os.path.join(jar_dir,jar_file)
main_class = "com.continuumio.seqreaderapp.App"
port = int(os.getenv('NUTCHPY_GATEWAY_PORT',0))
cmd_dict = {"jar_full": jar_full, "main_class": main_class, 'port': port}
java_cmd = "/usr/bin/java -cp ::{jar_full} -Xms512m -Xmx512m {main_class} {port}".format(**cmd_dict)
ps = subprocess.Popen(java_cmd, shell=os.name != 'nt',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# wait for JVM to start
time.sleep(1)
logger.debug(java_cmd)
port = int(ps.stdout.readline())
gateway = JavaGateway(GatewayClient(port=port),auto_convert=True)
logger.info("JAVA GATEWAY STARTED ON PORT: %d"% (port,) )
java_import(gateway.jvm, 'com.continuumio.seqreaderapp.LinkReader')
java_import(gateway.jvm, 'com.continuumio.seqreaderapp.NodeReader')
java_import(gateway.jvm, 'com.continuumio.seqreaderapp.SequenceReader')
## STOLEN SHAMELESS FROM APACHE/SPARK
# Create a thread to echo output from the GatewayServer, which is required
# for Java log output to show up:
class EchoOutputThread(Thread):
def __init__(self, stream):
Thread.__init__(self)
self.daemon = True
self.stream = stream
def run(self):
while True:
line = self.stream.readline()
sys.stderr.write(line)
EchoOutputThread(ps.stdout).start()
return gateway
class Singleton:
def __init__(self,klass):
self.klass = klass
self.instance = None
def __call__(self,*args,**kwds):
if self.instance == None:
self.instance = self.klass(*args,**kwds)
return self.instance
@Singleton
class NutchJavaGateway:
_gateway = None
@property
def gateway(self):
if self._gateway is None:
self._gateway = launch_gateway()
return self._gateway
def __del__(self):
if self._gateway:
logger.info("SHUTTING DOWN JAVA GATEWAY ")
self._gateway.shutdown(raise_exception=True)
logger.debug("SHUTDOWN COMPLETE")
gateway = NutchJavaGateway().gateway
| {
"repo_name": "ContinuumIO/nutchpy",
"path": "nutchpy/JVM.py",
"copies": "3",
"size": "2694",
"license": "apache-2.0",
"hash": -4228357179053467600,
"line_mean": 27.0625,
"line_max": 104,
"alpha_frac": 0.6525612472,
"autogenerated": false,
"ratio": 3.4941634241245136,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008555621482763792,
"num_lines": 96
} |
from __future__ import absolute_import, division, print_function
"""
Say 'yes' to types but 'no' to typing!
"""
__version__ = "0.2.0dev"
__author__ = "Hynek Schlawack"
__license__ = "MIT"
__copyright__ = "Copyright 2014 Hynek Schlawack"
def with_cmp(attrs):
"""
A class decorator that adds comparison methods based on *attrs*.
Two instances are compared as if the respective values of *attrs* were
tuples.
:param attrs: Attributes to work with.
:type attrs: `list` of native strings
"""
def attrs_to_tuple(obj):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a) for a in attrs)
def eq(self, other):
if isinstance(other, self.__class__):
return attrs_to_tuple(self) == attrs_to_tuple(other)
else:
return NotImplemented
def ne(self, other):
result = eq(self, other)
if result is NotImplemented:
return NotImplemented
else:
return not result
def lt(self, other):
if isinstance(other, self.__class__):
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def le(self, other):
if isinstance(other, self.__class__):
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def gt(self, other):
if isinstance(other, self.__class__):
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def ge(self, other):
if isinstance(other, self.__class__):
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
def hash_(self):
return hash(attrs_to_tuple(self))
def wrap(cl):
cl.__eq__ = eq
cl.__ne__ = ne
cl.__lt__ = lt
cl.__le__ = le
cl.__gt__ = gt
cl.__ge__ = ge
cl.__hash__ = hash_
return cl
return wrap
def with_repr(attrs):
"""
A class decorator that adds a human-friendly ``__repr__`` method that
returns a sensible representation based on *attrs*.
:param attrs: Attributes to work with.
:type attrs: Iterable of native strings.
"""
def repr_(self):
return "<{0}({1})>".format(
self.__class__.__name__,
", ".join(a + "=" + repr(getattr(self, a)) for a in attrs)
)
def wrap(cl):
cl.__repr__ = repr_
return cl
return wrap
def with_init(attrs, defaults=None):
"""
A class decorator that wraps the __init__ method of a class and sets
*attrs* first using keyword arguments.
:param attrs: Attributes to work with.
:type attrs: Iterable of native strings.
:param defaults: Default values if attributes are omitted on instantiation.
:type defaults: `dict` or `None`
"""
if defaults is None:
defaults = {}
def init(self, *args, **kw):
for a in attrs:
try:
v = kw.pop(a)
except KeyError:
try:
v = defaults[a]
except KeyError:
raise ValueError("Missing value for '{0}'.".format(a))
setattr(self, a, v)
self.__original_init__(*args, **kw)
def wrap(cl):
cl.__original_init__ = cl.__init__
cl.__init__ = init
return cl
return wrap
def attributes(attrs, defaults=None, create_init=True):
"""
A convenience class decorator that combines :func:`with_cmp`,
:func:`with_repr`, and optionally :func:`with_init` to avoid code
duplication.
:param attrs: Attributes to work with.
:type attrs: Iterable of native strings.
:param defaults: Default values if attributes are omitted on instantiation.
:type defaults: `dict` or `None`
:param create_init: Also apply :func:`with_init` (default: `True`)
:type create_init: `bool`
"""
def wrap(cl):
cl = with_cmp(attrs)(with_repr(attrs)(cl))
if create_init is True:
return with_init(attrs, defaults=defaults)(cl)
else:
return cl
return wrap
| {
"repo_name": "alex/characteristic",
"path": "characteristic.py",
"copies": "1",
"size": "4254",
"license": "mit",
"hash": 1549121538953368800,
"line_mean": 25.7547169811,
"line_max": 79,
"alpha_frac": 0.5632346027,
"autogenerated": false,
"ratio": 4.036053130929791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5099287733629791,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
"""
Test for lib_module
Reference : http://pythontesting.net/framework/nose/nose-introduction/
"""
import unittest
try:
# Python >= 3.3
import unittest.mock as mock
except ImportError:
import mock as mock
import pytest
try:
from io import StringIO
except ImportError: # py27
from StringIO import StringIO
import requests
# If we are calling pytest at a different hierarchical level than ros1_pip_pytemplate,
# we need first to install the module before testing,
# which is good practice anyway, to make sure the install process also works.
#
# Works out of the box with catkin build,
# When running from pure python, use pip in a virtual environment !
# https://packaging.python.org/tutorials/installing-packages/#creating-virtual-environments
# If access to ROS packages is needed (via pyros-setup) don't forget to enable site-packages.
# Here we are testing the core python code, importing only that module
from .. import Httpbin
# Basic UnitTest TestCase
class TestHttpbin(unittest.TestCase):
# fixture
def setUp(self):
self.httpbin = Httpbin('http://httpbin.org')
# The parent package has already been imported and loggers have been created.
# Here we patch the existing logger to confirm message is being logged
# Note there is also pytest-catchlog that can setup a pytest fixture for this...
@mock.patch('ros1_pip_pytemplate.lib_module._logger')
def test_retrieve(self, mock_logger):
resp = self.httpbin.get(params={"answer": "42"})
mock_logger.info.assert_called_once_with("Sending GET request to http://httpbin.org/get...")
status_code = resp.status_code
json_data = resp.json()
assert status_code == requests.status_codes.codes.OK
assert json_data.get('origin') is not None
assert json_data.get('url') is not None
assert json_data.get('args') == {'answer': '42'}
# In case we run this by itself, outside of a testing framework like pytest
if __name__ == '__main__':
pytest.main(['-s', '-x', __file__])
| {
"repo_name": "pyros-dev/ros1_template",
"path": "ros1_pip_pytemplate/ros1_pip_pytemplate/tests/test_lib_module_embedded.py",
"copies": "1",
"size": "2115",
"license": "mit",
"hash": -6031896855211849000,
"line_mean": 33.1129032258,
"line_max": 100,
"alpha_frac": 0.7073286052,
"autogenerated": false,
"ratio": 3.9022140221402215,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5109542627340221,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
"""
This is for 3D selection in Glue 3d scatter plot viewer.
"""
import math
import numpy as np
try:
from sklearn.neighbors import NearestNeighbors
SKLEARN_INSTALLED = True
except ImportError:
SKLEARN_INSTALLED = False
from glue.core import Subset
from glue.config import viewer_tool
from ..common.selection_tools import VispyMouseMode, get_map_data_scatter
from .layer_artist import ScatterLayerArtist
# TODO: replace by knn selection mode
@viewer_tool
class PointSelectionMode(VispyMouseMode):
icon = 'glue_point'
tool_id = 'scatter3d:point'
action_text = 'Select points using a point selection'
def release(self, event):
pass
def press(self, event):
if event.button == 1:
self.selection_origin = event.pos
# Get the values of the currently active layer artist - we
# specifically pick the layer artist that is selected in the layer
# artist view in the left since we have to pick one.
layer_artist = self.viewer._view.layer_list.current_artist()
# If the layer artist is for a Subset not Data, pick the first Data
# one instead (where the layer artist is a scatter artist)
if isinstance(layer_artist.layer, Subset):
for layer_artist in self.iter_data_layer_artists():
if isinstance(layer_artist, ScatterLayerArtist):
break
else:
return
# TODO: figure out how to make the above choice more sensible. How
# does the user know which data layer will be used? Can we use
# all of them in this mode?
self.active_layer_artist = layer_artist
# Ray intersection on the CPU to highlight the selected point(s)
data = get_map_data_scatter(self.active_layer_artist.layer,
self.active_layer_artist.visual,
self._vispy_widget)
# TODO: the threshold 2 here could replaced with a slider bar to
# control the selection region in the future
m1 = data > (event.pos - 2)
m2 = data < (event.pos + 2)
array_mark = np.argwhere(m1[:, 0] & m1[:, 1] & m2[:, 0] & m2[:, 1])
mask = np.zeros(len(data), dtype=bool)
for i in array_mark:
index = int(i[0])
mask[index] = True
self.mark_selected(mask, self.active_layer_artist.layer)
def move(self, event):
# add the knn scheme to decide selected region when moving mouse
if SKLEARN_INSTALLED:
if event.button == 1 and event.is_dragging:
# TODO: support multiple datasets here
data = get_map_data_scatter(self.active_layer_artist.layer,
self.active_layer_artist.visual,
self._vispy_widget)
# calculate the threshold and call draw visual
width = event.pos[0] - self.selection_origin[0]
height = event.pos[1] - self.selection_origin[1]
drag_distance = math.sqrt(width**2 + height**2)
canvas_diag = math.sqrt(self._vispy_widget.canvas.size[0]**2 +
self._vispy_widget.canvas.size[1]**2)
mask = np.zeros(self.active_layer_artist.layer.shape)
# neighbor num proportioned to mouse moving distance
n_neighbors = drag_distance / canvas_diag * self.active_layer_artist.layer.shape[0]
if n_neighbors >= 1:
neigh = NearestNeighbors(n_neighbors=n_neighbors)
neigh.fit(data)
select_index = neigh.kneighbors([self.selection_origin])[1]
mask[select_index] = 1
self.mark_selected(mask, self.active_layer_artist.layer)
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/scatter/scatter_toolbar.py",
"copies": "1",
"size": "4077",
"license": "bsd-2-clause",
"hash": -1692397125214109400,
"line_mean": 37.1028037383,
"line_max": 99,
"alpha_frac": 0.5744419917,
"autogenerated": false,
"ratio": 4.273584905660377,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002088381216107073,
"num_lines": 107
} |
from __future__ import absolute_import, division, print_function
"""
This is for 3D selection in Glue 3d scatter plot viewer.
"""
import numpy as np
from glue.config import viewer_tool
from glue.core.roi import Roi, Projected3dROI
from ..common.selection_tools import VispyMouseMode
class NearestNeighborROI(Roi):
def __init__(self, x=None, y=None, max_radius=None):
self.x = x
self.y = y
self.max_radius = max_radius
def contains(self, x, y):
mask = np.zeros(x.shape, bool)
d = np.hypot(x - self.x, y - self.y)
index = np.argmin(d)
if d[index] < self.max_radius:
mask[index] = True
return mask
def move_to(self, x, y):
self.x = x
self.y = y
def defined(self):
try:
return np.isfinite([self.x, self.y]).all()
except TypeError:
return False
def center(self):
return self.x, self.y
def reset(self):
self.x = self.y = self.max_radius = None
def __gluestate__(self, context):
return dict(x=float(self.x), y=float(self.y),
max_radius=float(self.max_radius))
@classmethod
def __setgluestate__(cls, rec, context):
return cls(rec['x'], rec['y'], rec['max_radius'])
@viewer_tool
class PointSelectionMode(VispyMouseMode):
icon = 'glue_point'
tool_id = 'scatter3d:point'
action_text = 'Select points using a point selection'
def press(self, event):
if event.button == 1:
roi = Projected3dROI(roi_2d=NearestNeighborROI(event.pos[0], event.pos[1],
max_radius=5),
projection_matrix=self.projection_matrix)
self.apply_roi(roi)
def release(self, event):
pass
def move(self, event):
pass
| {
"repo_name": "astrofrog/glue-vispy-viewers",
"path": "glue_vispy_viewers/scatter/scatter_toolbar.py",
"copies": "2",
"size": "1875",
"license": "bsd-2-clause",
"hash": 922404011067260900,
"line_mean": 24.6849315068,
"line_max": 86,
"alpha_frac": 0.568,
"autogenerated": false,
"ratio": 3.498134328358209,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001574555188159345,
"num_lines": 73
} |
from __future__ import absolute_import, division, print_function
"""
This is for 3D selection in Glue 3d scatter plot viewer.
"""
import numpy as np
from glue.core import Data
from glue.config import viewer_tool
from glue.core.roi import RectangularROI, CircularROI
from glue.viewers.common.qt.tool import CheckableTool
from glue.core.subset import SubsetState, ElementSubsetState
from glue.core.exceptions import IncompatibleAttribute
from glue.core.edit_subset_mode import EditSubsetMode
from glue.utils.geometry import points_inside_poly
from ..utils import as_matrix_transform
from ..extern.vispy.scene import Rectangle, Line, Ellipse
class VispyMouseMode(CheckableTool):
# this will create an abstract selection mode class to handle mouse events
# instanced by lasso, rectangle, circular and point mode
def __init__(self, viewer):
super(VispyMouseMode, self).__init__(viewer)
self._vispy_widget = viewer._vispy_widget
self.current_visible_array = None
def get_visible_data(self):
visible = []
# Loop over visible layer artists
for layer_artist in self.viewer._layer_artist_container:
# Only extract Data objects, not subsets
if isinstance(layer_artist.layer, Data):
visible.append(layer_artist.layer)
visual = layer_artist.visual # we only have one visual for each canvas
return visible, visual
def iter_data_layer_artists(self):
for layer_artist in self.viewer._layer_artist_container:
if isinstance(layer_artist.layer, Data):
yield layer_artist
def mark_selected(self, mask, data):
# We now make a subset state. For scatter plots we'll want to use an
# ElementSubsetState, while for cubes, we'll need to change to a
# MaskSubsetState.
subset_state = ElementSubsetState(indices=np.where(mask)[0], data=data)
# We now check what the selection mode is, and update the selection as
# needed (this is delegated to the correct subset mode).
mode = EditSubsetMode()
mode.update(self.viewer._data, subset_state, focus_data=data)
def mark_selected_dict(self, indices_dict):
subset_state = MultiElementSubsetState(indices_dict=indices_dict)
mode = EditSubsetMode()
if len(indices_dict) > 0:
mode.update(self.viewer._data, subset_state, focus_data=list(indices_dict)[0])
class MultiElementSubsetState(SubsetState):
def __init__(self, indices_dict=None):
super(MultiElementSubsetState, self).__init__()
indices_dict_uuid = {}
for key in indices_dict:
if isinstance(key, Data):
indices_dict_uuid[key.uuid] = indices_dict[key]
else:
indices_dict_uuid[key] = indices_dict[key]
self._indices_dict = indices_dict_uuid
def to_mask(self, data, view=None):
if data.uuid in self._indices_dict:
indices = self._indices_dict[data.uuid]
result = np.zeros(data.shape, dtype=bool)
result.flat[indices] = True
if view is not None:
result = result[view]
return result
else:
raise IncompatibleAttribute()
def copy(self):
state = MultiElementSubsetState(indices_dict=self._indices_dict)
return state
def __gluestate__(self, context):
serialized = {key: context.do(value) for key, value in self._indices_dict.items()}
return {'indices_dict': serialized}
@classmethod
def __setgluestate__(cls, rec, context):
unserialized = {key: context.object(value) for key, value in rec['indices_dict'].items()}
state = cls(indices_dict=unserialized)
return state
def get_map_data_scatter(data, visual, vispy_widget):
# Get the component IDs
x_att = vispy_widget.viewer_state.x_att
y_att = vispy_widget.viewer_state.y_att
z_att = vispy_widget.viewer_state.z_att
# Get the visible data
layer_data = np.nan_to_num([data[x_att],
data[y_att],
data[z_att]]).transpose()
tr = as_matrix_transform(visual.get_transform(map_from='visual', map_to='canvas'))
data = tr.map(layer_data)
data /= data[:, 3:] # normalize with homogeneous coordinates
return data[:, :2]
def get_map_data_volume(data, visual):
"""
Get the mapped buffer from self.visual to canvas.
:return: Mapped data position on canvas.
"""
tr = as_matrix_transform(visual.get_transform(map_from='visual',
map_to='canvas'))
pos_data = np.indices(data.data.shape[::-1], dtype=float)
pos_data = pos_data.reshape(3, -1).transpose()
data = tr.map(pos_data)
data /= data[:, 3:] # normalize with homogeneous coordinates
return data[:, :2]
def get_map_data(layer_artist, viewer):
from ..scatter.layer_artist import ScatterLayerArtist
from ..volume.layer_artist import VolumeLayerArtist
if isinstance(layer_artist, ScatterLayerArtist):
return get_map_data_scatter(layer_artist.layer, layer_artist.visual, viewer._vispy_widget)
elif isinstance(layer_artist, VolumeLayerArtist):
return get_map_data_volume(layer_artist.layer, layer_artist.visual)
else:
raise Exception("Unknown layer type: {0}".format(type(layer_artist)))
@viewer_tool
class LassoSelectionMode(VispyMouseMode):
icon = 'glue_lasso'
tool_id = 'vispy:lasso'
action_text = 'Select data using a lasso selection'
def __init__(self, viewer):
super(LassoSelectionMode, self).__init__(viewer)
self.line = Line(color='purple',
width=2, method='agg',
parent=self._vispy_widget.canvas.scene)
def activate(self):
self.reset()
def reset(self):
self.line_pos = []
self.line.set_data(np.zeros((0, 2), dtype=float))
self.line.parent = None
def press(self, event):
if event.button == 1:
self.line_pos.append(event.pos)
def move(self, event):
if event.button == 1 and event.is_dragging:
self.line_pos.append(event.pos)
self.line.set_data(np.array(self.line_pos, dtype=float))
self.line.parent = self._vispy_widget.canvas.scene
def release(self, event):
if event.button == 1:
if len(self.line_pos) > 0:
indices_dict = {}
for layer_artist in self.iter_data_layer_artists():
data = get_map_data(layer_artist, self.viewer)
vx, vy = np.array(self.line_pos).transpose()
x, y = data.transpose()
mask = points_inside_poly(x, y, vx, vy)
shape_mask = np.reshape(mask, layer_artist.layer.shape[::-1])
shape_mask = np.ravel(np.transpose(shape_mask))
indices_dict[layer_artist.layer] = np.where(shape_mask)[0]
self.mark_selected_dict(indices_dict)
self.reset()
@viewer_tool
class RectangleSelectionMode(VispyMouseMode):
icon = 'glue_square'
tool_id = 'vispy:rectangle'
action_text = 'Select data using a rectangular selection'
def __init__(self, viewer):
super(RectangleSelectionMode, self).__init__(viewer)
self.rectangle = Rectangle(center=(0, 0), width=1, height=1, border_width=2,
color=(0, 0, 0, 0), border_color='purple')
def activate(self):
self.reset()
def reset(self):
self.corner1 = None
self.corner2 = None
self.rectangle.parent = None
def press(self, event):
if event.button == 1:
self.corner1 = event.pos
def move(self, event):
if event.button == 1 and event.is_dragging:
self.corner2 = event.pos
x1, y1 = self.corner1
x2, y2 = self.corner2
if abs(x2 - x1) > 0 and abs(y2 - y1) > 0:
self.rectangle.center = 0.5 * (x1 + x2), 0.5 * (y1 + y2)
self.rectangle.width = abs(x2 - x1)
self.rectangle.height = abs(y2 - y1)
self.rectangle.parent = self._vispy_widget.canvas.scene
@property
def bounds(self):
x1, y1 = self.corner1
x2, y2 = self.corner2
return (min(x1, x2), max(x1, x2), min(y1, y2), max(y1, y2))
def release(self, event):
if event.button == 1:
if self.corner2 is not None:
r = RectangularROI(*self.bounds)
indices_dict = {}
for layer_artist in self.iter_data_layer_artists():
data = get_map_data(layer_artist, self.viewer)
mask = r.contains(data[:, 0], data[:, 1])
shape_mask = np.reshape(mask, layer_artist.layer.shape[::-1])
shape_mask = np.ravel(np.transpose(shape_mask))
indices_dict[layer_artist.layer] = np.where(shape_mask)[0]
self.mark_selected_dict(indices_dict)
self.reset()
@viewer_tool
class CircleSelectionMode(VispyMouseMode):
icon = 'glue_circle'
tool_id = 'vispy:circle'
action_text = 'Select data using a circular selection'
def __init__(self, viewer):
super(CircleSelectionMode, self).__init__(viewer)
self.ellipse = Ellipse(center=(0, 0), radius=1, border_width=2,
color=(0, 0, 0, 0), border_color='purple')
def activate(self):
self.reset()
def reset(self):
self.center = None
self.radius = 0
self.ellipse.parent = None
def press(self, event):
if event.button == 1:
self.center = event.pos
def move(self, event):
if event.button == 1 and event.is_dragging:
self.radius = max(abs(event.pos[0] - self.center[0]),
abs(event.pos[1] - self.center[1]))
if self.radius > 0:
self.ellipse.center = self.center
self.ellipse.radius = self.radius
self.ellipse.parent = self._vispy_widget.canvas.scene
def release(self, event):
if event.button == 1:
if self.radius > 0:
c = CircularROI(self.center[0], self.center[1], self.radius)
indices_dict = {}
for layer_artist in self.iter_data_layer_artists():
data = get_map_data(layer_artist, self.viewer)
mask = c.contains(data[:, 0], data[:, 1])
shape_mask = np.reshape(mask, layer_artist.layer.shape[::-1])
shape_mask = np.ravel(np.transpose(shape_mask))
indices_dict[layer_artist.layer] = np.where(shape_mask)[0]
self.mark_selected_dict(indices_dict)
self.reset()
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/common/selection_tools.py",
"copies": "1",
"size": "10992",
"license": "bsd-2-clause",
"hash": -110542073372431600,
"line_mean": 32.4103343465,
"line_max": 98,
"alpha_frac": 0.5932496361,
"autogenerated": false,
"ratio": 3.7786180818150568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.987031562918677,
"avg_score": 0.00031041774565732474,
"num_lines": 329
} |
from __future__ import absolute_import, division, print_function
"""
This is for 3D selection in Glue 3d volume rendering viewer, with shape selection and advanced
selection (not available now).
"""
import math
import numpy as np
from glue.core import Subset
from glue.config import viewer_tool
from ..common.toolbar import VispyMouseMode
from ..extern.vispy.scene import Markers
from .layer_artist import VolumeLayerArtist
from .floodfill_scipy import floodfill_scipy
# TODO: replace by dendrogram and floodfill mode
@viewer_tool
class PointSelectionMode(VispyMouseMode):
icon = 'glue_point'
tool_id = 'volume3d:point'
action_text = 'Select volume using a point selection'
def __init__(self, viewer):
super(PointSelectionMode, self).__init__(viewer)
self.markers = Markers(parent=self._vispy_widget.view.scene)
self.max_value_pos = None
self.max_value = None
def release(self, event):
pass
def deactivate(self):
self.markers.visible = False
def press(self, event):
"""
Assign mouse position and do point selection.
:param event:
"""
if event.button == 1:
self.selection_origin = event.pos
self.visible_data, self.visual = self.get_visible_data()
# Get the values of the currently active layer artist - we
# specifically pick the layer artist that is selected in the layer
# artist view in the left since we have to pick one.
layer_artist = self.viewer._view.layer_list.current_artist()
# If the layer artist is for a Subset not Data, pick the first Data
# one instead (where the layer artist is a volume artist)
if isinstance(layer_artist.layer, Subset):
for layer_artist in self.iter_data_layer_artists():
if isinstance(layer_artist, VolumeLayerArtist):
break
else:
return
# TODO: figure out how to make the above choice more sensible. How
# does the user know which data layer will be used? Can we use
# all of them in this mode?
values = layer_artist.layer[layer_artist.state.attribute]
self.active_layer_artist = layer_artist
self.current_visible_array = np.nan_to_num(values).astype(float)
# get start and end point of ray line
pos = self.get_ray_line()
max_value_pos, max_value = self.get_inter_value(pos)
self.max_value_pos = max_value_pos
self.max_value = max_value
# set marker and status text
if max_value:
self.markers.set_data(pos=np.array(max_value_pos),
face_color='yellow')
self.markers.visible = True
self._vispy_widget.canvas.update()
def move(self, event):
if event.button == 1 and event.is_dragging:
visible_data, visual = self.get_visible_data()
# calculate the threshold and call draw visual
width = event.pos[0] - self.selection_origin[0]
height = event.pos[1] - self.selection_origin[1]
drag_distance = math.sqrt(width**2 + height**2)
canvas_diag = math.sqrt(self._vispy_widget.canvas.size[0]**2 +
self._vispy_widget.canvas.size[1]**2)
mask = self.draw_floodfill_visual(drag_distance / canvas_diag)
if mask is not None:
# Smart selection mask has the same shape as data shape.
smart_mask = np.reshape(mask, self.current_visible_array.shape)
smart_mask = np.ravel(smart_mask)
self.mark_selected(smart_mask, self.active_layer_artist.layer)
def draw_floodfill_visual(self, threshold):
formate_data = np.asarray(self.current_visible_array, np.float64)
# Normalize the threshold so that it returns values in the range 1.01
# to 101 (since it can currently be between 0 and 1)
threshold = 1 + 10 ** (threshold * 4 - 2)
tr_visual = self._vispy_widget.limit_transforms[self.visual]
trans = tr_visual.translate
scale = tr_visual.scale
max_value_pos = self.max_value_pos[0]
# xyz index in volume array
x = int(round((max_value_pos[0] - trans[0]) / scale[0]))
y = int(round((max_value_pos[1] - trans[1]) / scale[1]))
z = int(round((max_value_pos[2] - trans[2]) / scale[2]))
if self.max_value_pos:
select_mask = floodfill_scipy(formate_data, (z, y, x), threshold)
status_text = ('x=%.2f, y=%.2f, z=%.2f' % (x, y, z) +
' value=%.2f' % self.max_value)
self.viewer.show_status(status_text)
return select_mask
else:
return None
def get_inter_value(self, pos):
tr_visual = self._vispy_widget.limit_transforms[self.visual]
trans = tr_visual.translate
scale = tr_visual.scale
inter_pos = []
for z in range(0, self.current_visible_array.shape[0]):
# 3D line defined with two points (x0, y0, z0) and (x1, y1, z1) as
# (x - x1)/(x2 - x1) = (y - y1)/(y2 - y1) = (z - z1)/(z2 - z1) = t
z = z * scale[2] + trans[2]
t = (z - pos[0][2]) / (pos[1][2] - pos[0][2])
x = t * (pos[1][0] - pos[0][0]) + pos[0][0]
y = t * (pos[1][1] - pos[0][1]) + pos[0][1]
inter_pos.append([x, y, z])
inter_pos = np.array(inter_pos)
# cut the line within the cube
m1 = inter_pos[:, 0] > trans[0] # for x
m2 = inter_pos[:, 0] < (self.current_visible_array.shape[2] * scale[0] + trans[0])
m3 = inter_pos[:, 1] > trans[1] # for y
m4 = inter_pos[:, 1] < (self.current_visible_array.shape[1] * scale[1] + trans[1])
inter_pos = inter_pos[m1 & m2 & m3 & m4]
# set colors for markers
colors = np.ones((inter_pos.shape[0], 4))
colors[:] = (0.5, 0.5, 0, 1)
# value of intersected points
inter_value = []
for each_point in inter_pos:
x = int((each_point[0] - trans[0]) / scale[0])
y = int((each_point[1] - trans[1]) / scale[1])
z = int((each_point[2] - trans[2]) / scale[2])
inter_value.append(self.current_visible_array[(z, y, x)])
inter_value = np.array(inter_value)
assert inter_value.shape[0] == inter_pos.shape[0]
# TODO: there is a risk that no intersected points found here
if len(inter_pos) == 0 or len(inter_value) == 0:
return None, None
else:
return [inter_pos[np.argmax(inter_value)]], inter_value.max()
def get_ray_line(self):
"""
Get the ray line from camera pos to the far point.
:return: Start point and end point position.
"""
tr_back = self.visual.get_transform(map_from='canvas', map_to='visual')
tr_visual = self._vispy_widget.limit_transforms[self.visual]
_cam = self._vispy_widget.view.camera
start_point = _cam.transform.map(_cam.center)
# If the camera is in the fov=0 mode, the start point is incorrect
# and is still at the 'near' distance. We therefore move the camera
# towards 'infinity' to get the right ray direction.
if _cam.fov == 0:
start_point[:3] *= 1e30
end_point = np.append(self.selection_origin, 1e-5).astype(float)
end_point = tr_back.map(end_point)
# add the self.visual local transform
end_point = tr_visual.map(end_point)
end_point = end_point[:3] / end_point[3]
return np.array([end_point, start_point[:3]])
| {
"repo_name": "PennyQ/glue-3d-viewer",
"path": "glue_vispy_viewers/volume/volume_toolbar.py",
"copies": "1",
"size": "7866",
"license": "bsd-2-clause",
"hash": 2754376998310729700,
"line_mean": 35.4166666667,
"line_max": 94,
"alpha_frac": 0.5729722858,
"autogenerated": false,
"ratio": 3.585232452142206,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4658204737942206,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
OPTIONS = {
'display_width': 80,
'arithmetic_join': 'inner',
'enable_cftimeindex': False
}
class set_options(object):
"""Set options for xarray in a controlled context.
Currently supported options:
- ``display_width``: maximum display width for ``repr`` on xarray objects.
Default: ``80``.
- ``arithmetic_join``: DataArray/Dataset alignment in binary operations.
Default: ``'inner'``.
- ``enable_cftimeindex``: flag to enable using a ``CFTimeIndex``
for time indexes with non-standard calendars or dates outside the
Timestamp-valid range. Default: ``False``.
You can use ``set_options`` either as a context manager:
>>> ds = xr.Dataset({'x': np.arange(1000)})
>>> with xr.set_options(display_width=40):
... print(ds)
<xarray.Dataset>
Dimensions: (x: 1000)
Coordinates:
* x (x) int64 0 1 2 3 4 5 6 ...
Data variables:
*empty*
Or to set global options:
>>> xr.set_options(display_width=80)
"""
def __init__(self, **kwargs):
invalid_options = {k for k in kwargs if k not in OPTIONS}
if invalid_options:
raise ValueError('argument names %r are not in the set of valid '
'options %r' % (invalid_options, set(OPTIONS)))
self.old = OPTIONS.copy()
OPTIONS.update(kwargs)
def __enter__(self):
return
def __exit__(self, type, value, traceback):
OPTIONS.clear()
OPTIONS.update(self.old)
| {
"repo_name": "jcmgray/xarray",
"path": "xarray/core/options.py",
"copies": "1",
"size": "1588",
"license": "apache-2.0",
"hash": -662777179104357000,
"line_mean": 28.9622641509,
"line_max": 78,
"alpha_frac": 0.6007556675,
"autogenerated": false,
"ratio": 3.882640586797066,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49833962542970656,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
from __future__ import with_statement
import pickle
import os
import sys
from io import UnsupportedOperation
import _pytest._code
import py
import pytest
import contextlib
from _pytest import capture
from _pytest.capture import CaptureManager
from _pytest.main import EXIT_NOTESTSCOLLECTED
needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')")
if sys.version_info >= (3, 0):
def tobytes(obj):
if isinstance(obj, str):
obj = obj.encode('UTF-8')
assert isinstance(obj, bytes)
return obj
def totext(obj):
if isinstance(obj, bytes):
obj = str(obj, 'UTF-8')
assert isinstance(obj, str)
return obj
else:
def tobytes(obj):
if isinstance(obj, unicode):
obj = obj.encode('UTF-8')
assert isinstance(obj, str)
return obj
def totext(obj):
if isinstance(obj, str):
obj = unicode(obj, 'UTF-8')
assert isinstance(obj, unicode)
return obj
def oswritebytes(fd, obj):
os.write(fd, tobytes(obj))
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def StdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
class TestCaptureManager(object):
def test_getmethod_default_no_fd(self, monkeypatch):
from _pytest.capture import pytest_addoption
from _pytest.config import Parser
parser = Parser()
pytest_addoption(parser)
default = parser._groups[0].options[0].default
assert default == "fd" if hasattr(os, "dup") else "sys"
parser = Parser()
monkeypatch.delattr(os, 'dup', raising=False)
pytest_addoption(parser)
assert parser._groups[0].options[0].default == "sys"
@needsosdup
@pytest.mark.parametrize("method",
['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')])
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.init_capturings()
outerr = capman.suspendcapture()
assert outerr == ("", "")
outerr = capman.suspendcapture()
assert outerr == ("", "")
print("hello")
out, err = capman.suspendcapture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resumecapture()
print("hello")
out, err = capman.suspendcapture()
if method != "no":
assert out == "hello\n"
capman.reset_capturings()
finally:
capouter.stop_capturing()
@needsosdup
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.init_capturings()
pytest.raises(AssertionError, "capman.init_capturings()")
capman.reset_capturings()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_unicode(testdir, method):
if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2, 2):
pytest.xfail("does not work on pypy < 2.2")
if sys.version_info >= (3, 0):
obj = "'b\u00f6y'"
else:
obj = "u'\u00f6y'"
testdir.makepyfile("""
# coding=utf8
# taken from issue 227 from nosetests
def test_unicode():
import sys
print (sys.stdout)
print (%s)
""" % obj)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines([
"*1 passed*"
])
@pytest.mark.parametrize("method", ['fd', 'sys'])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile("""
def test_unicode():
print ('b\\u00f6y')
""")
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines([
"*1 passed*"
])
def test_collect_capturing(testdir):
p = testdir.makepyfile("""
print ("collect %s failure" % 13)
import xyz42123
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*Captured stdout*",
"*collect 13 failure*",
])
class TestPerTestCapturing(object):
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile("""
def setup_module(mod):
print ("setup module")
def setup_function(function):
print ("setup " + function.__name__)
def test_func1():
print ("in func1")
assert 0
def test_func2():
print ("in func2")
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
])
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile("""
import sys
def setup_module(func):
print ("module-setup")
def setup_function(func):
print ("function-setup")
def test_func():
print ("in function")
assert 0
def teardown_function(func):
print ("in teardown")
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
])
def test_no_carry_over(self, testdir):
p = testdir.makepyfile("""
def test_func1():
print ("in func1")
def test_func2():
print ("in func2")
assert 0
""")
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile("""
def setup_function(function):
print ("setup func1")
def teardown_function(function):
print ("teardown func1")
assert 0
def test_func1():
print ("in func1")
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
'*teardown_function*',
'*Captured stdout*',
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
])
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile("""
def teardown_module(mod):
print ("teardown module")
assert 0
def test_func():
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
])
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile("""
import sys
def test_capturing():
print (42)
sys.stderr.write(str(23))
def test_capturing_error():
print (1)
sys.stderr.write(str(2))
raise ValueError
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*test_capturing_outerr.py .F",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
])
class TestLoggingInteraction(object):
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile("""
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
""")
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile("""
import logging
def setup_function(function):
logging.warn("hello1")
def test_logging():
logging.warn("hello2")
assert 0
def teardown_function(function):
logging.warn("hello3")
assert 0
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors show first!
"*WARN*hello1",
"*WARN*hello2",
])
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile("""
import logging
def setup_module(function):
logging.warn("hello1")
def test_logging():
logging.warn("hello2")
assert 0
def teardown_module(function):
logging.warn("hello3")
assert 0
""")
for optargs in (('--capture=sys',), ('--capture=fd',)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines([
"*WARN*hello3", # errors come first
"*WARN*hello1",
"*WARN*hello2",
])
# verify proper termination
assert "closed" not in s
def test_logging_initialized_in_test(self, testdir):
p = testdir.makepyfile("""
import sys
def test_something():
# pytest does not import logging
assert 'logging' not in sys.modules
import logging
logging.basicConfig()
logging.warn("hello432")
assert 0
""")
result = testdir.runpytest_subprocess(
p, "--traceconfig",
"-p", "no:capturelog", "-p", "no:hypothesis", "-p", "no:hypothesispytest")
assert result.ret != 0
result.stdout.fnmatch_lines([
"*hello432*",
])
assert 'operation on closed file' not in result.stderr.str()
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest("""
import logging
logging.basicConfig()
logging.warn("hello435")
""")
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == EXIT_NOTESTSCOLLECTED
result.stderr.fnmatch_lines([
"WARNING*hello435*",
])
assert 'operation on closed file' not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest("""
import logging
logging.basicConfig()
""")
# make sure that logging is still captured in tests
p = testdir.makepyfile("""
def test_hello():
import logging
logging.warn("hello433")
assert 0
""")
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines([
"WARNING*hello433*",
])
assert 'something' not in result.stderr.str()
assert 'operation on closed file' not in result.stderr.str()
class TestCaptureFixture(object):
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource("""
def test_hello(capsys):
print (42)
out, err = capsys.readouterr()
assert out.startswith("42")
""", *opt)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile("""
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR*setup*test_one*",
"E*capsys*capfd*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 error*"])
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile("""
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*test_one*",
"*capsys*capfd*same*time*",
"*test_two*",
"*capsys*capfd*same*time*",
"*2 failed in*",
])
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile("""
def test_hello(cap%s):
print ("xxx42xxx")
assert 0
""" % method)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"xxx42xxx",
])
@needsosdup
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello(capfd):
import os
os.write(1, "42".encode('ascii'))
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
""")
reprec.assertoutcome(passed=1)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile("""
def test_hello(capsys, missingarg):
pass
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*test_partial_setup_failure*",
"*1 error*",
])
@needsosdup
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile("""
def test_hello(capfd):
import os
os.write(1, str(42).encode('ascii'))
raise KeyboardInterrupt()
""")
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines([
"*KeyboardInterrupt*"
])
assert result.ret == 2
@pytest.mark.issue14
def test_capture_and_logging(self, testdir):
p = testdir.makepyfile("""
import logging
def test_log(capsys):
logging.error('x')
""")
result = testdir.runpytest_subprocess(p)
assert 'closed' not in result.stderr.str()
@pytest.mark.parametrize('fixture', ['capsys', 'capfd'])
def test_disabled_capture_fixture(self, testdir, fixture):
testdir.makepyfile("""
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
""".format(fixture=fixture))
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*while capture is disabled*
""")
assert 'captured before' not in result.stdout.str()
assert 'captured after' not in result.stdout.str()
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(_pytest._code.Source("""
def pytest_runtest_setup(item):
raise ValueError(42)
"""))
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
result.stdout.fnmatch_lines([
"*ValueError(42)*",
"*1 error*"
])
def test_fdfuncarg_skips_on_no_osdup(testdir):
testdir.makepyfile("""
import os
if hasattr(os, 'dup'):
del os.dup
def test_hello(capfd):
pass
""")
result = testdir.runpytest_subprocess("--capture=no")
result.stdout.fnmatch_lines([
"*1 skipped*"
])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
print ("hello19")
""")
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
assert 'hello19' not in result.stdout.str()
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile("""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
""")
result = testdir.runpytest('--cap=fd')
result.stdout.fnmatch_lines('''
*def test_func*
*assert 0*
*Captured*
*1 failed*
''')
def test_capture_early_option_parsing(testdir):
testdir.makeconftest("""
def pytest_runtest_setup():
print ("hello19")
""")
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert 'hello19' in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
""")
result = testdir.runpytest('--assert=plain')
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
""")
result = testdir.runpytest_subprocess(
"-p", "pytest_xyz", "--version", syspathinsert=True
)
result.stderr.fnmatch_lines([
"*in bad_snap",
" raise Exception('boom')",
"Exception: boom",
])
class TestCaptureIO(object):
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
if sys.version_info >= (3, 0):
f.write("\u00f6")
pytest.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
else:
f.write(unicode("\u00f6", 'UTF-8'))
f.write("hello") # bytes
s = f.getvalue()
f.close()
assert isinstance(s, unicode)
@pytest.mark.skipif(
sys.version_info[0] == 2,
reason='python 3 only behaviour',
)
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b'foo\r\n')
assert f.getvalue() == 'foo\r\n'
def test_bytes_io():
f = py.io.BytesIO()
f.write(tobytes("hello"))
pytest.raises(TypeError, "f.write(totext('hello'))")
s = f.getvalue()
assert s == tobytes("hello")
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert not f.isatty()
pytest.raises(IOError, f.read)
pytest.raises(IOError, f.readlines)
pytest.raises(IOError, iter, f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
@pytest.mark.skipif('sys.version_info < (3,)', reason='python2 has no buffer')
def test_dontreadfrominput_buffer_python3():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
fb = f.buffer
assert not fb.isatty()
pytest.raises(IOError, fb.read)
pytest.raises(IOError, fb.readlines)
pytest.raises(IOError, iter, fb)
pytest.raises(ValueError, fb.fileno)
f.close() # just for completeness
@pytest.mark.skipif('sys.version_info >= (3,)', reason='python2 has no buffer')
def test_dontreadfrominput_buffer_python2():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
with pytest.raises(AttributeError):
f.buffer
f.close() # just for completeness
@pytest.yield_fixture
def tmpfile(testdir):
f = testdir.makepyfile("").open('wb+')
yield f
if not f.closed:
f.close()
@needsosdup
def test_dupfile(tmpfile):
flist = []
for i in range(5):
nf = capture.safe_text_dupfile(tmpfile, "wb")
assert nf != tmpfile
assert nf.fileno() != tmpfile.fileno()
assert nf not in flist
print(i, end="", file=nf)
flist.append(nf)
fname_open = flist[0].name
assert fname_open == repr(flist[0].buffer)
for i in range(5):
f = flist[i]
f.close()
fname_closed = flist[0].name
assert fname_closed == repr(flist[0].buffer)
assert fname_closed != fname_open
tmpfile.seek(0)
s = tmpfile.read()
assert "01234" in repr(s)
tmpfile.close()
assert fname_closed == repr(flist[0].buffer)
def test_dupfile_on_bytesio():
io = py.io.BytesIO()
f = capture.safe_text_dupfile(io, "wb")
f.write("hello")
assert io.getvalue() == b"hello"
assert 'BytesIO object' in f.name
def test_dupfile_on_textio():
io = py.io.TextIO()
f = capture.safe_text_dupfile(io, "wb")
f.write("hello")
assert io.getvalue() == "hello"
assert not hasattr(f, 'name')
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = py.process.cmdexec("lsof -p %d" % pid)
except (py.process.cmdexec.Error, UnicodeDecodeError):
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof'")
yield
out2 = py.process.cmdexec("lsof -p %d" % pid)
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture(object):
pytestmark = needsosdup
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = tobytes("hello")
os.write(fd, data)
s = cap.snap()
cap.done()
assert not s
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open('wb+') as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(ValueError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self, tmpfile):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == tobytes('')
def test_writeorg(self, tmpfile):
data1, data2 = tobytes("foo"), tobytes("bar")
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2)
scap = cap.snap()
cap.done()
assert scap == totext(data1)
with open(tmpfile.name, 'rb') as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self, tmpfile):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = tobytes("hello")
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, tobytes("world"))
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, tobytes("but now"))
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AttributeError, cap.suspend)
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture(object):
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hx\xc4\x85\xc4\x87")
out, err = cap.readouterr()
assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
@pytest.mark.skipif('sys.version_info >= (3,)',
reason='text output different for bytes on python3')
def test_capturing_readouterr_decode_error_handling(self):
with self.getcapture() as cap:
# triggered a internal error in pytest
print('\xa6')
out, err = cap.readouterr()
assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(IOError, "sys.stdin.read()")
class TestStdCaptureFD(TestStdCapture):
pytestmark = needsosdup
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile("""
import os
def test_x():
os.write(1, "hello\\n".encode("ascii"))
assert 0
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_x*
*assert 0*
*Captured stdout*
""")
def test_intermingling(self):
with self.getcapture() as cap:
oswritebytes(1, "1")
sys.stdout.write(str(2))
sys.stdout.flush()
oswritebytes(1, "3")
oswritebytes(2, "a")
sys.stderr.write("b")
sys.stderr.flush()
oswritebytes(2, "c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD(object):
pytestmark = needsosdup
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile("""
import os
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_,
Capture=capture.FDCapture)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
cap.stop_capturing()
""")
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()['passed'] == 3
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = 'test text'
print(test_text.encode(sys.stdout.encoding, 'replace'))
(out, err) = capsys.readouterr()
assert out
assert err == ''
@needsosdup
@pytest.mark.parametrize('use', [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = StdCaptureFD(out=False, err=tmpfile)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
@needsosdup
def test_close_and_capture_again(testdir):
testdir.makepyfile("""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
""")
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines("""
*test_capture_again*
*assert 0*
*stdout*
*hello*
""")
@pytest.mark.parametrize('method', ['SysCapture', 'FDCapture'])
def test_capturing_and_logging_fundamentals(testdir, method):
if method == "StdCaptureFD" and not hasattr(os, 'dup'):
pytest.skip("need os.dup")
# here we check a fundamental feature
p = testdir.makepyfile("""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(out=False, in_=False,
Capture=capture.%s)
cap.start_capturing()
logging.warn("hello1")
outerr = cap.readouterr()
print ("suspend, captured %%s" %%(outerr,))
logging.warn("hello2")
cap.pop_outerr_to_orig()
logging.warn("hello3")
outerr = cap.readouterr()
print ("suspend2, captured %%s" %% (outerr,))
""" % (method,))
result = testdir.runpython(p)
result.stdout.fnmatch_lines("""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
""")
result.stderr.fnmatch_lines("""
WARNING:root:hello2
""")
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile("""
import sys
def test_capattr():
assert sys.stdout.errors == "strict"
assert sys.stderr.errors == "strict"
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(not sys.platform.startswith('win') and sys.version_info[:2] >= (3, 6),
reason='only py3.6+ on windows')
def test_py36_windowsconsoleio_workaround_non_standard_streams():
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream:
def write(self, s):
pass
stream = DummyStream()
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile("""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_pickling_and_unpickling_enocded_file():
# See https://bitbucket.org/pytest-dev/pytest/pull-request/194
# pickle.loads() raises infinite recursion if
# EncodedFile.__getattr__ is not implemented properly
ef = capture.EncodedFile(None, None)
ef_as_str = pickle.dumps(ef)
pickle.loads(ef_as_str)
| {
"repo_name": "MichaelAquilina/pytest",
"path": "testing/test_capture.py",
"copies": "1",
"size": "36309",
"license": "mit",
"hash": 204528762613949060,
"line_mean": 29.8225806452,
"line_max": 97,
"alpha_frac": 0.5506899116,
"autogenerated": false,
"ratio": 3.9821232726475104,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.503281318424751,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
# NOTE: unicode_literals removed from __future__ due to issues with cherrypy
import logging
import time
import sys
import cherrypy
import pprint
import json
import boundary_plugin
import boundary_accumulator
class BrixbitsApp(object):
def __init__(self, data_callback, port=12001, username='brixbits', password='brixbits', debug=False):
self.data_callback = data_callback
self.debug = debug
self.port = port
self.conf = {
'/': {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'Brixbitz Agent',
'tools.auth_basic.checkpassword': lambda self, u, p: u == username and p == password,
}
}
def start(self):
cherrypy.config.update({'server.socket_port': self.port, 'log.screen': False})
if not self.debug:
cherrypy.log.error_log.propagate = False
cherrypy.log.access_log.propagate = False
cherrypy.quickstart(self, '/', self.conf)
@cherrypy.expose
@cherrypy.tools.allow(methods=('POST',))
def put(self):
len = cherrypy.request.headers['Content-Length']
rawbody = cherrypy.request.body.read(int(len))
data = json.loads(rawbody)
if self.debug:
pprint.pprint(data)
self.data_callback(data)
return 'OK'
class BrixbitsPlugin(object):
MESSAGE_TYPE_APP_SERVER_METRICS = 2
MESSAGE_TYPE_TRANSACTION_METRICS = 3
MESSAGE_TYPE_EXIT_POINT_METRICS = 4
def __init__(self, boundary_metric_prefix):
self.boundary_metric_prefix = boundary_metric_prefix
self.settings = boundary_plugin.parse_params()
self.accumulator = boundary_accumulator
self.listener_app = None
@staticmethod
def get_app_server_metric_list():
return (
('BRIXBITS_POC_PERCENT_HEAP_MEMORY', 'CurrentPctOfHeapMemoryInUse', False, 0.01),
('BRIXBITS_POC_ERRORS', 'DeltaErrors', True),
('BRIXBITS_POC_EXCEPTIONS', 'DeltaExceptions', True),
('BRIXBITS_POC_GC_COUNT', 'DeltaGarbageCollectionCount', False),
('BRIXBITS_POC_GC_PERCENT_CPU', 'DeltaGarbageCollectionPctCPU', False, 0.01),
('BRIXBITS_POC_GC_TIME', 'DeltaGarbageCollectionTime', False),
('BRIXBITS_POC_JVM_CPU_INSTANCES_EXCEEDED', 'DeltaJVMCPUInstancesExceeded', False),
('BRIXBITS_POC_JVM_CPU_INSTANCES_EXCEEDED_PERCENT', 'DeltaJVMCPUInstancesExceededPct', False),
('BRIXBITS_POC_LIVE_SESSIONS', 'DeltaLiveSessions', False),
('BRIXBITS_POC_NEW_SESSIONS', 'DeltaNewSessions', False),
('BRIXBITS_POC_TRANSACTIONS', 'DeltaTransactions', False),
('BRIXBITS_POC_EXCEEDED_INSTANCE_LATENCY', 'ExceededInstanceLatency', True),
('BRIXBITS_POC_EXCEEDED_INTERVAL_LATENCY', 'ExceededIntervalLatency', True),
('BRIXBITS_POC_AVG_JVM_CPU_USED', 'IntervalAvgJVMCPUUsed', False),
)
@staticmethod
def get_transaction_metric_list():
return (
('BRIXBITS_POC_ERRORS', 'DeltaErrors', True),
('BRIXBITS_POC_PERCENT_ERRORS', 'DeltaErrorsPct', False, 0.01),
('BRIXBITS_POC_EXCEPTIONS', 'DeltaExceptions', True),
('BRIXBITS_POC_PERCENT_EXCEPTIONS', 'DeltaExceptionsPct', False, 0.01),
('BRIXBITS_POC_TRANSACTIONS', 'DeltaTransactions', False),
('BRIXBITS_POC_EXCEEDED_INSTANCE_LATENCY', 'ExceededInstanceLatencyInterval', False),
('BRIXBITS_POC_EXCEEDED_INTERVAL_LATENCY', 'ExceededIntervalLatency', True),
('BRIXBITS_POC_LATENCY', 'IntervalLatency', False)
)
@staticmethod
def get_exit_point_metric_list():
return (
('BRIXBITS_POC_EXIT_AVERAGE_CONNECT_LATENCY', 'DeltaAvgConnectExitLatency', False),
('BRIXBITS_POC_EXIT_AVERAGE_READ_LATENCY', 'DeltaAvgReadExitLatency', False),
('BRIXBITS_POC_EXIT_AVERAGE_WRITE_LATENCY', 'DeltaAvgWriteExitLatency', False),
('BRIXBITS_POC_EXIT_CONNECT_ERRORS', 'DeltaConnectErrors', False),
('BRIXBITS_POC_EXIT_CONNECTS', 'DeltaConnectExits', False),
('BRIXBITS_POC_EXIT_ERRORS', 'DeltaExitErrors', False),
('BRIXBITS_POC_EXIT_LATENCY', 'DeltaExitLatency', False),
('BRIXBITS_POC_EXIT_EXITS', 'DeltaExits', False),
('BRIXBITS_POC_EXIT_READ_ERRORS', 'DeltaReadErrors', False),
('BRIXBITS_POC_EXIT_READS', 'DeltaReadExits', False),
('BRIXBITS_POC_EXIT_WRITE_ERRORS', 'DeltaWriteErrors', False),
('BRIXBITS_POC_EXIT_WRITES', 'DeltaWriteExits', False),
)
def handle_metric_list(self, metric_list, data, source):
for metric_item in metric_list:
boundary_name, metric_name, accumulate = metric_item[:3]
scale = metric_item[3] if len(metric_item) >= 4 else None
metric_data = data.get(metric_name, None)
if not metric_data:
# If certain metrics do not exist or have no value
# (e.g. disabled in the server or just inactive) - skip them.
continue
if scale:
metric_data = float(metric_data) * scale
if accumulate:
value = self.accumulator.accumulate(source + '_' + metric_name, float(metric_data))
else:
value = metric_data
boundary_plugin.boundary_report_metric(self.boundary_metric_prefix + boundary_name, value, source)
def handle_metrics(self, data):
if int(data['msgType']) == self.MESSAGE_TYPE_APP_SERVER_METRICS:
source = '%s_%s' % (data['Host'], data['AppInstance'])
self.handle_metric_list(self.get_app_server_metric_list(), data['data'][0], source)
elif int(data['msgType']) == self.MESSAGE_TYPE_TRANSACTION_METRICS:
metric_list = self.get_transaction_metric_list()
for trx in data['data']:
source = '%s_%s_%s' % (data['Host'], data['AppInstance'], trx['TransactionName'])
self.handle_metric_list(metric_list, trx, source)
elif int(data['msgType']) == self.MESSAGE_TYPE_EXIT_POINT_METRICS:
metric_list = self.get_exit_point_metric_list()
for exitpoint in data['data']:
source = '%s_%s_%s:%s' % (data['Host'], data['AppInstance'],
exitpoint['ExitHostName'], exitpoint['ExitHostPort'])
self.handle_metric_list(metric_list, exitpoint, source)
def main(self):
logging.basicConfig(level=logging.ERROR, filename=self.settings.get('log_file', None))
reports_log = self.settings.get('report_log_file', None)
if reports_log:
boundary_plugin.log_metrics_to_file(reports_log)
boundary_plugin.start_keepalive_subprocess()
self.listener_app = BrixbitsApp(self.handle_metrics, int(self.settings.get('port', 12001)),
self.settings.get('username', 'brixbits'), self.settings.get('password', 'brixbits'),
self.settings.get('debug', False))
self.listener_app.start()
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '-v':
logging.basicConfig(level=logging.INFO)
plugin = BrixbitsPlugin('')
plugin.main()
| {
"repo_name": "BoundaryDev/boundary-plugin-brixbits-poc",
"path": "plugin.py",
"copies": "1",
"size": "7393",
"license": "apache-2.0",
"hash": 863301044580857000,
"line_mean": 45.7911392405,
"line_max": 110,
"alpha_frac": 0.6163938861,
"autogenerated": false,
"ratio": 3.6293568973981345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47457507834981344,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
from io import StringIO
import pandas as pd
# pylint: disable=import-error
from .base import ServerBase, DEFAULT_SCHEMA
from .dataset import Dataset
# pylint: enable=import-error
class Mart(ServerBase):
"""Class representing a biomart mart.
Used to represent specific mart instances on the server. Provides
functionality for listing and loading the datasets that are available
in the corresponding mart.
Args:
name (str): Name of the mart.
database_name (str): ID of the mart on the host.
display_name (str): Display name of the mart.
host (str): Url of host to connect to.
path (str): Path on the host to access to the biomart service.
port (int): Port to use for the connection.
use_cache (bool): Whether to cache requests.
virtual_schema (str): The virtual schema of the dataset.
Examples:
Listing datasets:
>>> server = Server(host='http://www.ensembl.org')
>>> mart = server.['ENSEMBL_MART_ENSEMBL']
>>> mart.list_datasets()
Selecting a dataset:
>>> dataset = mart['hsapiens_gene_ensembl']
"""
RESULT_COLNAMES = ['type', 'name', 'display_name', 'unknown', 'unknown2',
'unknown3', 'unknown4', 'virtual_schema', 'unknown5']
def __init__(self, name, database_name, display_name,
host=None, path=None, port=None, use_cache=True,
virtual_schema=DEFAULT_SCHEMA, extra_params=None):
super().__init__(host=host, path=path,
port=port, use_cache=use_cache)
self._name = name
self._database_name = database_name
self._display_name = display_name
self._virtual_schema = virtual_schema
self._extra_params = extra_params
self._datasets = None
def __getitem__(self, name):
return self.datasets[name]
@property
def name(self):
"""Name of the mart (used as id)."""
return self._name
@property
def display_name(self):
"""Display name of the mart."""
return self._display_name
@property
def database_name(self):
"""Database name of the mart on the host."""
return self._database_name
@property
def datasets(self):
"""List of datasets in this mart."""
if self._datasets is None:
self._datasets = self._fetch_datasets()
return self._datasets
def list_datasets(self):
"""Lists available datasets in a readable DataFrame format.
Returns:
pd.DataFrame: Frame listing available datasets.
"""
def _row_gen(attributes):
for attr in attributes.values():
yield (attr.name, attr.display_name)
return pd.DataFrame.from_records(
_row_gen(self.datasets),
columns=['name', 'display_name'])
def _fetch_datasets(self):
# Get datasets using biomart.
response = self.get(type='datasets', mart=self._name)
# Read result frame from response.
result = pd.read_csv(StringIO(response.text), sep='\t',
header=None, names=self.RESULT_COLNAMES)
# Convert result to a dict of datasets.
datasets = (self._dataset_from_row(row)
for _, row in result.iterrows())
return {d.name: d for d in datasets}
def _dataset_from_row(self, row):
return Dataset(name=row['name'], display_name=row['display_name'],
host=self.host, path=self.path,
port=self.port, use_cache=self.use_cache,
virtual_schema=row['virtual_schema'])
def __repr__(self):
return (('<biomart.Mart name={!r}, display_name={!r},'
' database_name={!r}>')
.format(self._name, self._display_name,
self._database_name))
| {
"repo_name": "jrderuiter/pybiomart",
"path": "src/pybiomart/mart.py",
"copies": "1",
"size": "4194",
"license": "mit",
"hash": 8835436125980904000,
"line_mean": 31.765625,
"line_max": 77,
"alpha_frac": 0.5939437291,
"autogenerated": false,
"ratio": 4.127952755905512,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 128
} |
from __future__ import absolute_import, division, print_function
"""
The LinkManager class is responsible for maintaining the conistency
of the "web of links" in a DataCollection. It discovers how to
combine ComponentLinks together to discover all of the ComponentIDs
that a Data object can derive,
As a trivial example, imagine a chain of 2 ComponentLinks linking
ComponentIDs across 3 datasets:
Data: D1 D2 D3
ComponentID: x y z
Link: <---x2y---><--y2z-->
The LinkManager autocreates a link from D1.id['x'] to D3.id['z']
by chaining x2y and y2z.
"""
import logging
from .data import DerivedComponent, Data, ComponentID
from .component_link import ComponentLink
from .link_helpers import LinkCollection
from ..external import six
from .contracts import contract
def accessible_links(cids, links):
""" Calculate all ComponentLink objects in a list
that can be calculated from a collection of componentIds
:param cids: Collection of ComponentID objects
:param links: Iterable of ComponentLink objects
:rtype: list
A list of all links that can be evaluated
given the input ComponentIDs
"""
cids = set(cids)
return [l for l in links if
set(l.get_from_ids()) <= cids]
def discover_links(data, links):
""" Discover all links to components that can be derived
based on the current components known to a dataset, and a set
of ComponentLinks.
:param Data: Data object to discover new components for
:param links: Set of ComponentLinks to use
:rtype: dict
A dict of componentID -> componentLink
The ComponentLink that data can use to generate the componentID.
"""
# TODO: try to add shortest paths first -- should
# prevent lots of repeated checking
cids = set(data.primary_components)
cid_links = {}
depth = {}
for cid in cids:
depth[cid] = 0
while True:
for link in accessible_links(cids, links):
from_ = set(link.get_from_ids())
to_ = link.get_to_id()
cost = max([depth[f] for f in from_]) + 1
if to_ in cids and cost >= depth[to_]:
continue
depth[to_] = cost
cids.add(to_)
cid_links[to_] = link
break
else:
# no more links to add
break
return cid_links
def find_dependents(data, link):
""" Determine which `DerivedComponents` in a data set
depend (either directly or implicitly) on a given
`ComponentLink`.
:param data: The data object to consider
:param link: The `ComponentLink` object to consider
:rtype: set
A `set` of `DerivedComponent` IDs that cannot be
calculated without the input `Link`
"""
dependents = set()
visited = set()
while True:
for derived in data.derived_components:
derived = data.get_component(derived)
if derived in visited:
continue
to_, from_ = derived.link.get_to_id(), derived.link.get_from_ids()
if derived.link is link:
dependents.add(to_)
visited.add(derived)
break
if any(f in dependents for f in from_):
dependents.add(to_)
visited.add(derived)
break
else:
break # nothing more to remove
return dependents
class LinkManager(object):
"""A helper class to generate and store ComponentLinks,
and compute which components are accesible from which data sets
"""
def __init__(self):
self._links = set()
self._duplicated_ids = []
def add_link(self, link):
"""
Ingest one or more ComponentLinks to the manager
Parameters
----------
link : ComponentLink, LinkCollection, or list thereof
The link(s) to ingest
"""
if isinstance(link, (LinkCollection, list)):
for l in link:
self.add_link(l)
else:
self._links.add(link)
if link.identity:
self._add_duplicated_id(link)
self._reassign_mergers()
def _add_duplicated_id(self, link):
frm = link.get_from_ids()
assert len(frm) == 1
frm = frm[0]
to = link.get_to_id()
if (frm, to) in self._duplicated_ids:
return
if (to, frm) in self._duplicated_ids:
return
self._duplicated_ids.append((frm, to))
def _reassign_mergers(self):
"""Update all links such that any reference to a duplicate
componentID is replaced with the original"""
for l in self._links:
for o, d in self._duplicated_ids:
l.replace_ids(d, o)
def _merge_duplicate_ids(self, data):
for o, d in self._duplicated_ids:
if d in data.components:
data.update_id(d, o)
@contract(link=ComponentLink)
def remove_link(self, link):
logging.getLogger(__name__).debug('removing link %s', link)
self._links.remove(link)
@contract(data=Data)
def update_data_components(self, data):
"""Update all the DerivedComponents in a data object, based on
all the Components deriveable based on the links in self.
This overrides any ComponentLinks stored in the
DerivedComponents of the data itself -- any components which
depend on a link not tracked by the LinkManager will be
deleted.
Parameters
-----------
data : Data object
Behavior
--------
DerivedComponents will be replaced / added into
the data object
"""
self._merge_duplicate_ids(data)
self._remove_underiveable_components(data)
self._add_deriveable_components(data)
def _remove_underiveable_components(self, data):
""" Find and remove any DerivedComponent in the data
which requires a ComponentLink not tracked by this LinkManager
"""
data_links = set(data.get_component(dc).link
for dc in data.derived_components)
missing_links = data_links - self._links
to_remove = []
for m in missing_links:
to_remove.extend(find_dependents(data, m))
for r in to_remove:
data.remove_component(r)
def _add_deriveable_components(self, data):
"""Find and add any DerivedComponents that a data object can
calculate given the ComponentLinks tracked by this
LinkManager
"""
links = discover_links(data, self._links)
for cid, link in six.iteritems(links):
d = DerivedComponent(data, link)
data.add_component(d, cid)
@property
def links(self):
return list(self._links)
def clear(self):
self._links.clear()
def __contains__(self, item):
return item in self._links
| {
"repo_name": "JudoWill/glue",
"path": "glue/core/link_manager.py",
"copies": "1",
"size": "6984",
"license": "bsd-3-clause",
"hash": -4244539079751018000,
"line_mean": 29.7665198238,
"line_max": 78,
"alpha_frac": 0.6033791523,
"autogenerated": false,
"ratio": 4.1745367603108186,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00044766682982542007,
"num_lines": 227
} |
from __future__ import absolute_import, division, print_function
THEMES = ["classic", "vim", "dark vim", "midnight", "solarized", "agr-256", "monokai", "monokai-256"]
from pudb.py3compat import execfile, raw_input
import urwid
def get_palette(may_use_fancy_formats, theme="classic"):
if may_use_fancy_formats:
def add_setting(color, setting):
return color+","+setting
else:
def add_setting(color, setting):
return color
#-----------------------------------------------------------------------------------
# Reference for some palette items:
#
# "namespace" : "import", "from", "using"
# "operator" : "+", "-", "=" etc.
# NOTE: Does not include ".", which is assigned the type "source"
# "argument" : Function arguments
# "builtin" : "range", "dict", "set", "list", etc.
# "pseudo" : "None", "True", "False"
# NOTE: Does not include "self", which is assigned the type "source"
# "dunder" : Class method names of the form __<name>__ within a class definition
# "exception" : Exception names
# "keyword" : All keywords except those specifically assigned to "keyword2"
# ("from", "and", "break", "is", "try", "pass", etc.)
# "keyword2" : "class", "def", "exec", "lambda", "print"
#-----------------------------------------------------------------------------------
inheritance_map = (
# Style Inherits from
# ---------- ----------
("namespace", "keyword"),
("operator", "source"),
("argument", "source"),
("builtin", "source"),
("pseudo", "source"),
("dunder", "name"),
("exception", "source"),
("keyword2", "keyword")
)
palette_dict = {
# The following styles are initialized to "None". Themes
# (including custom Themes) may set them as needed.
# If they are not set by a theme, then they will
# inherit from other styles in accordance with
# the inheritance_map.
"namespace": None,
"operator": None,
"argument": None,
"builtin": None,
"pseudo": None,
"dunder": None,
"exception": None,
"keyword2": None,
# {{{ ui
"header": ("black", "light gray", "standout"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "dark green"),
"button": (add_setting("white", "bold"), "dark blue"),
"focused button": ("light cyan", "black"),
"dialog title": (add_setting("white", "bold"), "dark cyan"),
"background": ("black", "light gray"),
"hotkey": (add_setting("black", "underline"), "light gray", "underline"),
"focused sidebar": (add_setting("yellow", "bold"), "light gray", "standout"),
"warning": (add_setting("white", "bold"), "dark red", "standout"),
"label": ("black", "light gray"),
"value": (add_setting("yellow", "bold"), "dark blue"),
"fixed value": ("light gray", "dark blue"),
"group head": (add_setting("dark blue", "bold"), "light gray"),
"search box": ("black", "dark cyan"),
"search not found": ("white", "dark red"),
# }}}
# {{{ shell
"command line edit": (add_setting("yellow", "bold"), "dark blue"),
"command line prompt": (add_setting("white", "bold"), "dark blue"),
"command line output": ("light cyan", "dark blue"),
"command line input": (add_setting("light cyan", "bold"), "dark blue"),
"command line error": (add_setting("light red", "bold"), "dark blue"),
"focused command line output": ("black", "dark green"),
"focused command line input": (add_setting("light cyan", "bold"), "dark green"),
"focused command line error": ("black", "dark green"),
"command line clear button": (add_setting("white", "bold"), "dark blue"),
"command line focused button": ("light cyan", "black"),
# }}}
# {{{ source
"breakpoint": ("black", "dark cyan"),
"disabled breakpoint": ("dark gray", "dark cyan"),
"focused breakpoint": ("black", "dark green"),
"focused disabled breakpoint": ("dark gray", "dark green"),
"current breakpoint": (add_setting("white", "bold"), "dark cyan"),
"disabled current breakpoint": (add_setting("dark gray", "bold"), "dark cyan"),
"focused current breakpoint": (add_setting("white", "bold"), "dark green", "bold"),
"focused disabled current breakpoint": (add_setting("dark gray", "bold"), "dark green", "bold"),
"source": (add_setting("yellow", "bold"), "dark blue"),
"focused source": ("black", "dark green"),
"highlighted source": ("black", "dark magenta"),
"current source": ("black", "dark cyan"),
"current focused source": (add_setting("white", "bold"), "dark cyan"),
"current highlighted source": ("white", "dark cyan"),
# {{{ highlighting
"line number": ("light gray", "dark blue"),
"keyword": (add_setting("white", "bold"), "dark blue"),
"name": ("light cyan", "dark blue"),
"literal": ("light magenta, bold", "dark blue"),
"string": (add_setting("light magenta", "bold"), "dark blue"),
"doublestring": (add_setting("light magenta", "bold"), "dark blue"),
"singlestring": (add_setting("light magenta", "bold"), "dark blue"),
"docstring": (add_setting("light magenta", "bold"), "dark blue"),
"punctuation": ("light gray", "dark blue"),
"comment": ("light gray", "dark blue"),
# }}}
# }}}
# {{{ breakpoints
"breakpoint marker": ("dark red", "dark blue"),
"breakpoint source": (add_setting("yellow", "bold"), "dark red"),
"breakpoint focused source": ("black", "dark red"),
"current breakpoint source": ("black", "dark red"),
"current breakpoint focused source": ("white", "dark red"),
# }}}
# {{{ variables view
"variables": ("black", "dark cyan"),
"variable separator": ("dark cyan", "light gray"),
"var label": ("dark blue", "dark cyan"),
"var value": ("black", "dark cyan"),
"focused var label": ("dark blue", "dark green"),
"focused var value": ("black", "dark green"),
"highlighted var label": ("white", "dark cyan"),
"highlighted var value": ("black", "dark cyan"),
"focused highlighted var label": ("white", "dark green"),
"focused highlighted var value": ("black", "dark green"),
"return label": ("white", "dark blue"),
"return value": ("black", "dark cyan"),
"focused return label": ("light gray", "dark blue"),
"focused return value": ("black", "dark green"),
# }}}
# {{{ stack
"stack": ("black", "dark cyan"),
"frame name": ("black", "dark cyan"),
"focused frame name": ("black", "dark green"),
"frame class": ("dark blue", "dark cyan"),
"focused frame class": ("dark blue", "dark green"),
"frame location": ("light cyan", "dark cyan"),
"focused frame location": ("light cyan", "dark green"),
"current frame name": (add_setting("white", "bold"),
"dark cyan"),
"focused current frame name": (add_setting("white", "bold"),
"dark green", "bold"),
"current frame class": ("dark blue", "dark cyan"),
"focused current frame class": ("dark blue", "dark green"),
"current frame location": ("light cyan", "dark cyan"),
"focused current frame location": ("light cyan", "dark green"),
# }}}
}
if theme == "classic":
pass
elif theme == "vim":
# {{{ vim theme
palette_dict.update({
"source": ("black", "default"),
"keyword": ("brown", "default"),
"kw_namespace": ("dark magenta", "default"),
"literal": ("black", "default"),
"string": ("dark red", "default"),
"doublestring": ("dark red", "default"),
"singlestring": ("dark red", "default"),
"docstring": ("dark red", "default"),
"punctuation": ("black", "default"),
"comment": ("dark blue", "default"),
"classname": ("dark cyan", "default"),
"name": ("dark cyan", "default"),
"line number": ("dark gray", "default"),
"breakpoint marker": ("dark red", "default"),
# {{{ shell
"command line edit":
("black", "default"),
"command line prompt":
(add_setting("black", "bold"), "default"),
"command line output":
(add_setting("black", "bold"), "default"),
"command line input":
("black", "default"),
"command line error":
(add_setting("light red", "bold"), "default"),
"focused command line output":
("black", "dark green"),
"focused command line input":
(add_setting("light cyan", "bold"), "dark green"),
"focused command line error":
("black", "dark green"),
# }}}
})
# }}}
elif theme == "dark vim":
# {{{ dark vim
palette_dict.update({
"header": ("black", "light gray", "standout"),
# {{{ variables view
"variables": ("black", "dark gray"),
"variable separator": ("dark cyan", "light gray"),
"var label": ("light gray", "dark gray"),
"var value": ("white", "dark gray"),
"focused var label": ("light gray", "light blue"),
"focused var value": ("white", "light blue"),
"highlighted var label": ("light gray", "dark green"),
"highlighted var value": ("white", "dark green"),
"focused highlighted var label": ("light gray", "light blue"),
"focused highlighted var value": ("white", "light blue"),
"return label": ("light gray", "dark gray"),
"return value": ("light cyan", "dark gray"),
"focused return label": ("yellow", "light blue"),
"focused return value": ("white", "light blue"),
# }}}
# {{{ stack view
"stack": ("black", "dark gray"),
"frame name": ("light gray", "dark gray"),
"focused frame name": ("light gray", "light blue"),
"frame class": ("dark blue", "dark gray"),
"focused frame class": ("dark blue", "light blue"),
"frame location": ("white", "dark gray"),
"focused frame location": ("white", "light blue"),
"current frame name": (add_setting("white", "bold"),
"dark gray"),
"focused current frame name": (add_setting("white", "bold"),
"light blue", "bold"),
"current frame class": ("dark blue", "dark gray"),
"focused current frame class": ("dark blue", "dark green"),
"current frame location": ("light cyan", "dark gray"),
"focused current frame location": ("light cyan", "light blue"),
# }}}
# {{{ breakpoint view
"breakpoint": ("light gray", "dark gray"),
"disabled breakpoint": ("black", "dark gray"),
"focused breakpoint": ("light gray", "light blue"),
"focused disabled breakpoint": ("black", "light blue"),
"current breakpoint": (add_setting("white", "bold"), "dark gray"),
"disabled current breakpoint": ("black", "dark gray"),
"focused current breakpoint":
(add_setting("white", "bold"), "light blue"),
"focused disabled current breakpoint":
("black", "light blue"),
# }}}
# {{{ ui widgets
"selectable": ("light gray", "dark gray"),
"focused selectable": ("white", "light blue"),
"button": ("light gray", "dark gray"),
"focused button": ("white", "light blue"),
"background": ("black", "light gray"),
"hotkey": (add_setting("black", "underline"), "light gray", "underline"),
"focused sidebar": ("light blue", "light gray", "standout"),
"warning": (add_setting("white", "bold"), "dark red", "standout"),
"label": ("black", "light gray"),
"value": ("white", "dark gray"),
"fixed value": ("light gray", "dark gray"),
"search box": ("white", "dark gray"),
"search not found": ("white", "dark red"),
"dialog title": (add_setting("white", "bold"), "dark gray"),
# }}}
# {{{ source view
"breakpoint marker": ("dark red", "black"),
"breakpoint source": ("light gray", "dark red"),
"breakpoint focused source": ("black", "dark red"),
"current breakpoint source": ("black", "dark red"),
"current breakpoint focused source": ("white", "dark red"),
# }}}
# {{{ highlighting
"source": ("white", "black"),
"focused source": ("white", "light blue"),
"highlighted source": ("black", "dark magenta"),
"current source": ("black", "light gray"),
"current focused source": ("white", "dark cyan"),
"current highlighted source": ("white", "dark cyan"),
"line number": ("dark gray", "black"),
"keyword": ("yellow", "black"),
"literal": ("dark magenta", "black"),
"string": ("dark magenta", "black"),
"doublestring": ("dark magenta", "black"),
"singlestring": ("dark magenta", "black"),
"docstring": ("dark magenta", "black"),
"name": ("light cyan", "black"),
"punctuation": ("yellow", "black"),
"comment": ("light blue", "black"),
# }}}
# {{{ shell
"command line edit":
("white", "black"),
"command line prompt":
(add_setting("yellow", "bold"), "black"),
"command line output":
(add_setting("yellow", "bold"), "black"),
"command line input":
("white", "black"),
"command line error":
(add_setting("light red", "bold"), "black"),
"focused command line output":
("black", "light blue"),
"focused command line input":
(add_setting("light cyan", "bold"), "light blue"),
"focused command line error":
("black", "light blue"),
# }}}
})
# }}}
elif theme == "midnight":
# {{{ midnight
# Based on XCode's midnight theme
# Looks best in a console with green text against black background
palette_dict.update({
"variables": ("white", "default"),
"var label": ("light blue", "default"),
"var value": ("white", "default"),
"stack": ("white", "default"),
"frame name": ("white", "default"),
"frame class": ("dark blue", "default"),
"frame location": ("light cyan", "default"),
"current frame name": (add_setting("white", "bold"), "default"),
"current frame class": ("dark blue", "default"),
"current frame location": ("light cyan", "default"),
"focused frame name": ("black", "dark green"),
"focused frame class": (add_setting("white", "bold"), "dark green"),
"focused frame location": ("dark blue", "dark green"),
"focused current frame name": ("black", "dark green"),
"focused current frame class": (add_setting("white", "bold"), "dark green"),
"focused current frame location": ("dark blue", "dark green"),
"breakpoint": ("default", "default"),
"search box": ("default", "default"),
"breakpoint": ("white", "default"),
"disabled breakpoint": ("dark gray", "default"),
"focused breakpoint": ("black", "dark green"),
"focused disabled breakpoint": ("dark gray", "dark green"),
"current breakpoint": (add_setting("white", "bold"), "default"),
"disabled current breakpoint": (add_setting("dark gray", "bold"), "default"),
"focused current breakpoint": (add_setting("white", "bold"), "dark green", "bold"),
"focused disabled current breakpoint": (add_setting("dark gray", "bold"), "dark green", "bold"),
"source": ("white", "default"),
"highlighted source": ("white", "light cyan"),
"current source": ("white", "light gray"),
"current focused source": ("white", "brown"),
"line number": ("light gray", "default"),
"keyword": ("dark magenta", "default"),
"name": ("white", "default"),
"literal": ("dark cyan", "default"),
"string": ("dark red", "default"),
"doublestring": ("dark red", "default"),
"singlestring": ("light blue", "default"),
"docstring": ("light red", "default"),
"backtick": ("light green", "default"),
"punctuation": ("white", "default"),
"comment": ("dark green", "default"),
"classname": ("dark cyan", "default"),
"funcname": ("white", "default"),
"breakpoint marker": ("dark red", "default"),
# {{{ shell
"command line edit": ("white", "default"),
"command line prompt": (add_setting("white", "bold"), "default"),
"command line output": (add_setting("white", "bold"), "default"),
"command line input": (add_setting("white", "bold"), "default"),
"command line error": (add_setting("light red", "bold"), "default"),
"focused command line output": ("black", "dark green"),
"focused command line input": (add_setting("white", "bold"), "dark green"),
"focused command line error": ("black", "dark green"),
"command line clear button": (add_setting("white", "bold"), "default"),
"command line focused button": ("black", "light gray"), # White
# doesn't work in curses mode
# }}}
})
# }}}
elif theme == "solarized":
# {{{ solarized
palette_dict.update({
# UI
"header": ("black", "light blue", "standout"),
"focused sidebar": ("yellow", "light blue", "standout"),
"group head": ("black", "light blue"),
"background": ("black", "light blue"),
"label": ("black", "light blue"),
"value": ("white", "dark blue"),
"fixed value": ("black", "light blue"),
"variables": ("light blue", "default"),
"var label": ("dark blue", "default"),
"var value": ("light blue", "default"),
"focused var label": ("white", "dark blue"),
"focused var value": ("black", "dark blue"),
"highlighted var label": ("white", "light green"),
"highlighted var value": ("white", "light green"),
"focused highlighted var label": ("white", "light green"),
"focused highlighted var value": ("white", "light green"),
"stack": ("light blue", "default"),
"frame name": ("dark blue", "default"),
"frame class": ("light blue", "default"),
"frame location": ("light green", "default"),
"focused frame name": ("white", "dark blue"),
"focused frame class": ("black", "dark blue"),
"focused frame location": ("dark gray", "dark blue"),
"focused current frame name": ("white", "light green"),
"focused current frame class": ("black", "light green"),
"focused current frame location": ("dark gray", "light green"),
"current frame name": ("white", "light green"),
"current frame class": ("black", "light green"),
"current frame location": ("dark gray", "light green"),
# breakpoints
"breakpoint": ("light blue", "default"),
"disabled breakpoint": ("light gray", "default"),
"focused breakpoint": ("white", "light green"),
"focused disabled breakpoint": ("light gray", "light green"),
"current breakpoint": ("white", "dark blue"),
"disabled current breakpoint": ("light gray", "dark blue"),
"focused current breakpoint": ("white", "light green"),
"focused disabled current breakpoint": ("light gray", "light green"),
# source
"breakpoint source": ("light blue", "black"),
"current breakpoint source": ("black", "light green"),
"breakpoint focused source": ("dark gray", "dark blue"),
"current breakpoint focused source": ("black", "light green"),
"breakpoint marker": ("dark red", "default"),
"search box": ("default", "default"),
"source": ("light blue", "default"),
"current source": ("light gray", "light blue"),
"current focused source": ("light gray", "light blue"),
"focused source": ("dark gray", "dark blue"),
"current highlighted source": ("black", "dark cyan"),
"highlighted source": ("light blue", "black"),
"line number": ("light blue", "default"),
"keyword": ("dark green", "default"),
"name": ("light blue", "default"),
"literal": ("dark cyan", "default"),
"string": ("dark cyan", "default"),
"doublestring": ("dark cyan", "default"),
"singlestring": ("light blue", "default"),
"docstring": ("dark cyan", "default"),
"backtick": ("light green", "default"),
"punctuation": ("light blue", "default"),
"comment": ("light green", "default"),
"classname": ("dark blue", "default"),
"funcname": ("dark blue", "default"),
# shell
"command line edit": ("light blue", "default"),
"command line prompt": ("light blue", "default"),
"command line output": ("light blue", "default"),
"command line input": ("light blue", "default"),
"command line error": ("dark red", "default"),
"focused command line output": ("black", "light green"),
"focused command line input": ("black", "light green"),
"focused command line error": ("dark red", "light blue"),
"command line clear button": ("light blue", "default"),
"command line focused button": ("black", "light blue"),
})
# }}}
elif theme == "agr-256":
# {{{ agr-256
palette_dict.update({
"header": ("h235", "h252", "standout"),
# {{{ variables view
"variables": ("h235", "h233"),
"variable separator": ("h23", "h252"),
"var label": ("h111", "h233"),
"var value": ("h255", "h233"),
"focused var label": ("h192", "h24"),
"focused var value": ("h192", "h24"),
"highlighted var label": ("h252", "h22"),
"highlighted var value": ("h255", "h22"),
"focused highlighted var label": ("h252", "h64"),
"focused highlighted var value": ("h255", "h64"),
"return label": ("h113", "h233"),
"return value": ("h113", "h233"),
"focused return label": (add_setting("h192", "bold"), "h24"),
"focused return value": ("h192", "h24"),
# }}}
# {{{ stack view
"stack": ("h235", "h233"),
"frame name": ("h192", "h233"),
"focused frame name": ("h192", "h24"),
"frame class": ("h111", "h233"),
"focused frame class": ("h192", "h24"),
"frame location": ("h252", "h233"),
"focused frame location": ("h192", "h24"),
"current frame name": ("h255", "h22"),
"focused current frame name": ("h255", "h64"),
"current frame class": ("h111", "h22"),
"focused current frame class": ("h255", "h64"),
"current frame location": ("h252", "h22"),
"focused current frame location": ("h255", "h64"),
# }}}
# {{{ breakpoint view
"breakpoint": ("h80", "h233"),
"disabled breakpoint": ("h60", "h233"),
"focused breakpoint": ("h192", "h24"),
"focused disabled breakpoint": ("h182", "h24"),
"current breakpoint": (add_setting("h255", "bold"), "h22"),
"disabled current breakpoint": (add_setting("h016", "bold"), "h22"),
"focused current breakpoint": (add_setting("h255", "bold"), "h64"),
"focused disabled current breakpoint": (add_setting("h016", "bold"), "h64"),
# }}}
# {{{ ui widgets
"selectable": ("h252", "h235"),
"focused selectable": ("h255", "h24"),
"button": ("h252", "h235"),
"focused button": ("h255", "h24"),
"background": ("h235", "h252"),
"hotkey": (add_setting("h235", "underline"), "h252", "underline"),
"focused sidebar": ("h23", "h252", "standout"),
"warning": (add_setting("h255", "bold"), "h124", "standout"),
"label": ("h235", "h252"),
"value": ("h255", "h17"),
"fixed value": ("h252", "h17"),
"group head": (add_setting("h25", "bold"), "h252"),
"search box": ("h255", "h235"),
"search not found": ("h255", "h124"),
"dialog title": (add_setting("h255", "bold"), "h235"),
# }}}
# {{{ source view
"breakpoint marker": ("h160", "h235"),
"breakpoint source": ("h252", "h124"),
"breakpoint focused source": ("h192", "h124"),
"current breakpoint source": ("h192", "h124"),
"current breakpoint focused source": (add_setting("h192", "bold"), "h124"),
# }}}
# {{{ highlighting
"source": ("h255", "h235"),
"focused source": ("h192", "h24"),
"highlighted source": ("h252", "h22"),
"current source": (add_setting("h252", "bold"), "h23"),
"current focused source": (add_setting("h192", "bold"), "h23"),
"current highlighted source": ("h255", "h22"),
"line number": ("h241", "h235"),
"keyword": ("h111", "h235"),
"literal": ("h173", "h235"),
"string": ("h113", "h235"),
"doublestring": ("h113", "h235"),
"singlestring": ("h113", "h235"),
"docstring": ("h113", "h235"),
"name": ("h192", "h235"),
"punctuation": ("h223", "h235"),
"comment": ("h246", "h235"),
# }}}
# {{{ shell
"command line edit": ("h255", "h233"),
"command line prompt": (add_setting("h192", "bold"), "h233"),
"command line output": ("h80", "h233"),
"command line input": ("h255", "h233"),
"command line error": ("h160", "h233"),
"focused command line output": (add_setting("h192", "bold"), "h24"),
"focused command line input": ("h255", "h24"),
"focused command line error": ("h235", "h24"),
"command line clear button": (add_setting("h255", "bold"), "h233"),
"command line focused button": ("h255", "h24"),
# }}}
})
# }}}
elif theme == "monokai":
# {{{ midnight
# Based on XCode's midnight theme
# Looks best in a console with green text against black background
palette_dict.update({
"variables": ("white", "default"),
"var label": ("light blue", "default"),
"var value": ("white", "default"),
"stack": ("white", "default"),
"frame name": ("white", "default"),
"frame class": ("dark blue", "default"),
"frame location": ("light cyan", "default"),
"current frame name": (add_setting("white", "bold"), "default"),
"current frame class": ("dark blue", "default"),
"current frame location": ("light cyan", "default"),
"focused frame name": ("black", "dark green"),
"focused frame class": (add_setting("white", "bold"), "dark green"),
"focused frame location": ("dark blue", "dark green"),
"focused current frame name": ("black", "dark green"),
"focused current frame class": (add_setting("white", "bold"), "dark green"),
"focused current frame location": ("dark blue", "dark green"),
"breakpoint": ("default", "default"),
"search box": ("default", "default"),
"breakpoint": ("white", "default"),
"disabled breakpoint": ("dark gray", "default"),
"focused breakpoint": ("black", "dark green"),
"focused disabled breakpoint": ("dark gray", "dark green"),
"current breakpoint": (add_setting("white", "bold"), "default"),
"disabled current breakpoint": (add_setting("dark gray", "bold"), "default"),
"focused current breakpoint": (add_setting("white", "bold"), "dark green", "bold"),
"focused disabled current breakpoint": (add_setting("dark gray", "bold"), "dark green", "bold"),
"source": ("white", "default"),
"highlighted source": ("white", "light cyan"),
"current source": ("white", "light gray"),
"current focused source": ("white", "brown"),
"line number": ("dark gray", "black"),
"keyword2": ("light cyan", "black"),
"name": ("light green", "black"),
"literal": ("light magenta", "black"),
"namespace": ("light red", "black"),
"operator": ("light red", "black"),
"argument": ("brown", "black"),
"builtin": ("light cyan", "black"),
"pseudo": ("light magenta", "black"),
"dunder": ("light cyan", "black"),
"exception": ("light cyan", "black"),
"keyword": ("light red", "black"),
"string": ("dark red", "default"),
"doublestring": ("dark red", "default"),
"singlestring": ("light blue", "default"),
"docstring": ("light red", "default"),
"backtick": ("light green", "default"),
"punctuation": ("white", "default"),
"comment": ("dark green", "default"),
"classname": ("dark cyan", "default"),
"funcname": ("white", "default"),
"breakpoint marker": ("dark red", "default"),
# {{{ shell
"command line edit": ("white", "default"),
"command line prompt": (add_setting("white", "bold"), "default"),
"command line output": (add_setting("white", "bold"), "default"),
"command line input": (add_setting("white", "bold"), "default"),
"command line error": (add_setting("light red", "bold"), "default"),
"focused command line output": ("black", "dark green"),
"focused command line input": (add_setting("white", "bold"), "dark green"),
"focused command line error": ("black", "dark green"),
"command line clear button": (add_setting("white", "bold"), "default"),
"command line focused button": ("black", "light gray"), # White
# doesn't work in curses mode
# }}}
})
# }}}
elif theme == "monokai-256":
# {{{ monokai-256
palette_dict.update({
"header": ("h235", "h252", "standout"),
# {{{ variables view
"variables": ("h235", "h233"),
"variable separator": ("h23", "h252"),
"var label": ("h111", "h233"),
"var value": ("h255", "h233"),
"focused var label": ("h237", "h172"),
"focused var value": ("h237", "h172"),
"highlighted var label": ("h252", "h22"),
"highlighted var value": ("h255", "h22"),
"focused highlighted var label": ("h252", "h64"),
"focused highlighted var value": ("h255", "h64"),
"return label": ("h113", "h233"),
"return value": ("h113", "h233"),
"focused return label": (add_setting("h192", "bold"), "h24"),
"focused return value": ("h237", "h172"),
# }}}
# {{{ stack view
"stack": ("h235", "h233"),
"frame name": ("h192", "h233"),
"focused frame name": ("h237", "h172"),
"frame class": ("h111", "h233"),
"focused frame class": ("h237", "h172"),
"frame location": ("h252", "h233"),
"focused frame location": ("h237", "h172"),
"current frame name": ("h255", "h22"),
"focused current frame name": ("h255", "h64"),
"current frame class": ("h111", "h22"),
"focused current frame class": ("h255", "h64"),
"current frame location": ("h252", "h22"),
"focused current frame location": ("h255", "h64"),
# }}}
# {{{ breakpoint view
"breakpoint": ("h80", "h233"),
"disabled breakpoint": ("h60", "h233"),
"focused breakpoint": ("h237", "h172"),
"focused disabled breakpoint": ("h182", "h24"),
"current breakpoint": (add_setting("h255", "bold"), "h22"),
"disabled current breakpoint": (add_setting("h016", "bold"), "h22"),
"focused current breakpoint": (add_setting("h255", "bold"), "h64"),
"focused disabled current breakpoint": (add_setting("h016", "bold"), "h64"),
# }}}
# {{{ ui widgets
"selectable": ("h252", "h235"),
"focused selectable": ("h255", "h24"),
"button": ("h252", "h235"),
"focused button": ("h255", "h24"),
"background": ("h235", "h252"),
"hotkey": (add_setting("h235", "underline"), "h252", "underline"),
"focused sidebar": ("h23", "h252", "standout"),
"warning": (add_setting("h255", "bold"), "h124", "standout"),
"label": ("h235", "h252"),
"value": ("h255", "h17"),
"fixed value": ("h252", "h17"),
"group head": (add_setting("h25", "bold"), "h252"),
"search box": ("h255", "h235"),
"search not found": ("h255", "h124"),
"dialog title": (add_setting("h255", "bold"), "h235"),
# }}}
# {{{ source view
"breakpoint marker": ("h160", "h235"),
"breakpoint source": ("h252", "h124"),
"breakpoint focused source": ("h192", "h124"),
"current breakpoint source": ("h192", "h124"),
"current breakpoint focused source": (add_setting("h192", "bold"), "h124"),
# }}}
# {{{ highlighting
"source": ("h255", "h235"),
"focused source": ("h237", "h172"),
"highlighted source": ("h252", "h22"),
"current source": (add_setting("h252", "bold"), "h23"),
"current focused source": (add_setting("h192", "bold"), "h23"),
"current highlighted source": ("h255", "h22"),
"line number": ("h241", "h235"),
"keyword2": ("h51", "h235"),
"name": ("h155", "h235"),
"literal": ("h141", "h235"),
"namespace": ("h198", "h235"),
"operator": ("h198", "h235"),
"argument": ("h208", "h235"),
"builtin": ("h51", "h235"),
"pseudo": ("h141", "h235"),
"dunder": ("h51", "h235"),
"exception": ("h51", "h235"),
"keyword": ("h198", "h235"),
"string": ("h228", "h235"),
"doublestring": ("h228", "h235"),
"singlestring": ("h228", "h235"),
"docstring": ("h243", "h235"),
"punctuation": ("h255", "h235"),
"comment": ("h243", "h235"),
# }}}
# {{{ shell
"command line edit": ("h255", "h233"),
"command line prompt": (add_setting("h192", "bold"), "h233"),
"command line output": ("h80", "h233"),
"command line input": ("h255", "h233"),
"command line error": ("h160", "h233"),
"focused command line output": (add_setting("h192", "bold"), "h24"),
"focused command line input": ("h255", "h24"),
"focused command line error": ("h235", "h24"),
"command line clear button": (add_setting("h255", "bold"), "h233"),
"command line focused button": ("h255", "h24"),
# }}}
})
# }}}
else:
try:
symbols = {
"palette": palette_dict,
"add_setting": add_setting,
}
from os.path import expanduser, expandvars
execfile(expanduser(expandvars(theme)), symbols)
except:
print("Error when importing theme:")
from traceback import print_exc
print_exc()
raw_input("Hit enter:")
# Apply style inheritance
for child, parent in inheritance_map:
if palette_dict[child] is None:
palette_dict[child] = palette_dict[parent]
palette_list = []
for setting_name, color_values in palette_dict.items():
fg_color = color_values[0].lower().strip()
bg_color = color_values[1].lower().strip()
# Convert hNNN syntax to equivalent #RGB value
# (https://github.com/wardi/urwid/issues/24)
if fg_color.startswith('h') or bg_color.startswith('h'):
attr = urwid.AttrSpec(fg_color, bg_color, colors=256)
palette_list.append((setting_name, 'default', 'default', 'default',
attr.foreground,
attr.background))
else:
palette_list.append((setting_name,) + color_values)
return palette_list
# vim: foldmethod=marker
| {
"repo_name": "albfan/pudb",
"path": "pudb/theme.py",
"copies": "1",
"size": "38702",
"license": "mit",
"hash": -246185625876987600,
"line_mean": 38.2913705584,
"line_max": 108,
"alpha_frac": 0.4913441166,
"autogenerated": false,
"ratio": 4.151238871607852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5142582988207852,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
"""
The rechunk module defines:
intersect_chunks: a function for
converting chunks to new dimensions
rechunk: a function to convert the blocks
of an existing dask array to new chunks or blockshape
"""
from itertools import count, product, chain
from operator import getitem, add
import numpy as np
from toolz import merge, accumulate
from .core import concatenate3, Array, normalize_chunks
rechunk_names = ('rechunk-%d' % i for i in count(1))
def cumdims_label(chunks, const):
""" Interal utility for cumulative sum with label.
>>> cumdims_label(((5, 3, 3), (2, 2, 1)), 'n') # doctest: +NORMALIZE_WHITESPACE
[(('n', 0), ('n', 5), ('n', 8), ('n', 11)),
(('n', 0), ('n', 2), ('n', 4), ('n', 5))]
"""
return [tuple(zip((const,) * (1 + len(bds)),
list(accumulate(add, (0,) + bds))))
for bds in chunks ]
def _breakpoints(cumold, cumnew):
"""
>>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')
>>> old = cumdims_label(((2, 2, 1), (5,)), 'o')
>>> _breakpoints(new[0], old[0])
(('n', 0), ('o', 0), ('n', 2), ('o', 2), ('o', 4), ('n', 5), ('o', 5))
>>> _breakpoints(new[1], old[1])
(('n', 0), ('o', 0), ('n', 2), ('n', 4), ('n', 5), ('o', 5))
"""
return tuple(sorted(tuple(cumold) + tuple(cumnew), key=lambda x:x[1]))
def _intersect_1d(breaks):
"""
Internal utility to intersect chunks for 1d after preprocessing.
>>> new = cumdims_label(((2, 3), (2, 2, 1)), 'n')
>>> old = cumdims_label(((2, 2, 1), (5,)), 'o')
>>> _intersect_1d(_breakpoints(old[0], new[0])) # doctest: +NORMALIZE_WHITESPACE
(((0, slice(0, 2, None)),),
((1, slice(0, 2, None)), (2, slice(0, 1, None))))
>>> _intersect_1d(_breakpoints(old[1], new[1])) # doctest: +NORMALIZE_WHITESPACE
(((0, slice(0, 2, None)),),
((0, slice(2, 4, None)),),
((0, slice(4, 5, None)),))
Parameters
----------
breaks: list of tuples
Each tuple is ('o', 8) or ('n', 8)
These are pairs of 'o' old or new 'n'
indicator with a corresponding cumulative sum.
Uses 'o' and 'n' to make new tuples of slices for
the new block crosswalk to old blocks.
"""
start = 0
last_end = 0
old_idx = 0
lastbi = ('n',0)
ret = [[]]
for idx in range(1, len(breaks)):
bi = breaks[idx]
lastbi = breaks[idx -1]
if 'n' in lastbi[0] and bi[1]:
ret.append([])
if 'o' in lastbi[0]:
start = 0
else:
start = last_end
end = bi[1] - lastbi[1] + start
last_end = end
if bi[1] == lastbi[1]:
continue
ret[-1].append((old_idx, slice(start, end)))
if bi[0] == 'o':
old_idx += 1
start = 0
return tuple(map(tuple, filter(None, ret)))
def intersect_chunks(old_chunks=None,
new_chunks=None,
shape=None):
"""
Make dask.array slices as intersection of old and new chunks.
>>> intersect_chunks(((4, 4), (2,)),
... ((8,), (1, 1))) # doctest: +NORMALIZE_WHITESPACE
((((0, slice(0, 4, None)), (0, slice(0, 1, None))),
((1, slice(0, 4, None)), (0, slice(0, 1, None)))),
(((0, slice(0, 4, None)), (0, slice(1, 2, None))),
((1, slice(0, 4, None)), (0, slice(1, 2, None)))))
Parameters
----------
old_chunks : iterable of tuples
block sizes along each dimension (convert from old_chunks)
new_chunks: iterable of tuples
block sizes along each dimension (converts to new_chunks)
shape : tuple of ints
Shape of the entire array (not needed if using chunks)
old_blockshape: size of each old block as tuple
(converts from this old_blockshape)
new_blockshape: size of each new block as tuple
(converts to this old_blockshape)
Note: shape is only required when using old_blockshape or new_blockshape.
"""
old_chunks = normalize_chunks(old_chunks, shape)
new_chunks = normalize_chunks(new_chunks, shape)
cmo = cumdims_label(old_chunks,'o')
cmn = cumdims_label(new_chunks,'n')
sums = [sum(o) for o in old_chunks]
sums2 = [sum(n) for n in old_chunks]
if not sums == sums2:
raise ValueError('Cannot change dimensions from to %r' % sums2)
zipped = zip(old_chunks,new_chunks)
old_to_new = tuple(_intersect_1d(_breakpoints(cm[0],cm[1])) for cm in zip(cmo, cmn))
cross1 = tuple(product(*old_to_new))
cross = tuple(chain(tuple(product(*cr)) for cr in cross1))
return cross
def blockdims_dict_to_tuple(old, new):
"""
>>> blockdims_dict_to_tuple((4, 5, 6), {1: 10})
(4, 10, 6)
"""
newlist = list(old)
for k, v in new.items():
newlist[k] = v
return tuple(newlist)
def blockshape_dict_to_tuple(old_chunks, d):
"""
>>> blockshape_dict_to_tuple(((4, 4), (5, 5)), {1: 3})
((4, 4), (3, 3, 3, 1))
"""
shape = tuple(map(sum, old_chunks))
new_chunks = list(old_chunks)
for k, v in d.items():
div = shape[k] // v
mod = shape[k] % v
new_chunks[k] = (v,) * div + ((mod,) if mod else ())
return tuple(new_chunks)
def rechunk(x, chunks):
"""
Convert blocks in dask array x for new chunks.
>>> import dask.array as da
>>> a = np.random.uniform(0, 1, 7**4).reshape((7,) * 4)
>>> x = da.from_array(a, chunks=((2, 3, 2),)*4)
>>> x.chunks
((2, 3, 2), (2, 3, 2), (2, 3, 2), (2, 3, 2))
>>> y = rechunk(x, chunks=((2, 4, 1), (4, 2, 1), (4, 3), (7,)))
>>> y.chunks
((2, 4, 1), (4, 2, 1), (4, 3), (7,))
chunks also accept dict arguments mapping axis to blockshape
>>> y = rechunk(x, chunks={1: 2}) # rechunk axis 1 with blockshape 2
Parameters
----------
x: dask array
chunks: the new block dimensions to create
"""
if isinstance(chunks, dict):
if not chunks or isinstance(next(iter(chunks.values())), int):
chunks = blockshape_dict_to_tuple(x.chunks, chunks)
else:
chunks = blockdims_dict_to_tuple(x.chunks, chunks)
chunks = normalize_chunks(chunks, x.shape)
if not len(chunks) == x.ndim or tuple(map(sum, chunks)) != x.shape:
raise ValueError("Provided chunks are not consistent with shape")
crossed = intersect_chunks(x.chunks, chunks)
x2 = dict()
temp_name = next(rechunk_names)
new_index = tuple(product(*(tuple(range(len(n))) for n in chunks)))
for flat_idx, cross1 in enumerate(crossed):
new_idx = new_index[flat_idx]
key = (temp_name,) + new_idx
cr2 = iter(cross1)
old_blocks = tuple(tuple(ind for ind,_ in cr) for cr in cross1)
subdims = tuple(len(set(ss[i] for ss in old_blocks)) for i in range(x.ndim))
rec_cat_arg =np.empty(subdims).tolist()
inds_in_block = product(*(range(s) for s in subdims))
for old_block in old_blocks:
ind_slics = next(cr2)
old_inds = tuple(tuple(s[0] for s in ind_slics) for i in range(x.ndim))
# list of nd slices
slic = tuple(tuple(s[1] for s in ind_slics) for i in range(x.ndim))
ind_in_blk = next(inds_in_block)
temp = rec_cat_arg
for i in range(x.ndim -1):
temp = getitem(temp, ind_in_blk[i])
for ind, slc in zip(old_inds, slic):
temp[ind_in_blk[-1]] = (getitem, (x.name,) + ind, slc)
x2[key] = (concatenate3, rec_cat_arg)
x2 = merge(x.dask, x2)
return Array(x2, temp_name, chunks, dtype=x.dtype)
| {
"repo_name": "marianotepper/dask",
"path": "dask/array/rechunk.py",
"copies": "2",
"size": "7667",
"license": "bsd-3-clause",
"hash": 6063678160818981000,
"line_mean": 32.6271929825,
"line_max": 89,
"alpha_frac": 0.5488457024,
"autogenerated": false,
"ratio": 3.1668731928954976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4715718895295497,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# Third-party
import numpy as np
from numpy.testing import assert_array_equal
# Package
from glue.core import data_factories as df
def test_npy_load(tmpdir):
data = np.array([("a",152.2352,-21.513), ("b",21.412,35.1341)],
dtype=[('name','|S1'),('ra','f8'),('dec','f8')])
with open(tmpdir.join('test.npy').strpath, 'wb') as f:
np.save(f, data)
f.seek(0)
data2 = df.load_data(f.name)
assert_array_equal(data['name'], data2.get_component('name').labels)
assert_array_equal(data['ra'], data2['ra'])
assert_array_equal(data['dec'], data2['dec'])
def test_npz_load(tmpdir):
data1 = np.array([("a",152.2352,-21.513), ("b",21.412,35.1341)],
dtype=[('name','|S1'),('ra','f8'),('dec','f8')])
data2 = np.array([("c",15.2352,-2.513), ("d",2.412,3.1341)],
dtype=[('name','|S1'),('l','f8'),('b','f8')])
with open(tmpdir.join('test.npz').strpath, 'wb') as f:
np.savez(f, data1=data1, data2=data2)
f.seek(0)
data_loaded = df.load_data(f.name)
arr = data_loaded[0]
assert_array_equal(data1['name'], arr.get_component('name').labels)
assert_array_equal(data1['ra'], arr['ra'])
assert_array_equal(data1['dec'], arr['dec'])
arr = data_loaded[1]
assert_array_equal(data2['name'], arr.get_component('name').labels)
assert_array_equal(data2['l'], arr['l'])
assert_array_equal(data2['b'], arr['b'])
| {
"repo_name": "saimn/glue",
"path": "glue/core/data_factories/tests/test_numpy.py",
"copies": "2",
"size": "1569",
"license": "bsd-3-clause",
"hash": 4988003828737545000,
"line_mean": 34.6590909091,
"line_max": 76,
"alpha_frac": 0.5595920969,
"autogenerated": false,
"ratio": 3.0173076923076922,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45768997892076924,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
"""
This module contains some example ``Rollup`` objects, each implementing the
interface expected by the Manhattan backend for rollup aggregations.
"""
import time
from datetime import datetime, timedelta
import pytz
class LocalRollup(object):
def __init__(self, tzname):
self.tz = pytz.timezone(tzname)
def start_date_for(self, timestamp):
dt = datetime.utcfromtimestamp(timestamp).replace(tzinfo=pytz.utc)
dt_local = dt.astimezone(self.tz).replace(tzinfo=None)
return dt_local.date()
class LocalDayRollup(LocalRollup):
def get_bucket(self, timestamp, history):
return time.mktime(self.start_date_for(timestamp).timetuple())
class LocalWeekRollup(LocalRollup):
def get_bucket(self, timestamp, history):
day = self.start_date_for(timestamp)
days_from_sunday = day.isoweekday() % 7
day -= timedelta(days=days_from_sunday)
return time.mktime(day.timetuple())
class AllRollup(object):
def get_bucket(self, timestamp, history):
return 0
class BrowserRollup(object):
def browser_from_user_agent(self, user_agent):
# FIXME This is a pretty naive and less useful implementation.
if 'Chrome' in user_agent:
return u'Chrome'
elif 'Safari' in user_agent:
return u'Safari'
elif 'Firefox' in user_agent:
return u'Firefox'
elif 'MSIE' in user_agent:
return u'IE'
else:
return u'Unknown'
def get_bucket(self, timestamp, history):
return (history.user_agents and
self.browser_from_user_agent(list(history.user_agents)[0]) or
u'')
| {
"repo_name": "storborg/manhattan",
"path": "manhattan/backend/rollups.py",
"copies": "1",
"size": "1756",
"license": "mit",
"hash": -3280859895393880000,
"line_mean": 26.873015873,
"line_max": 77,
"alpha_frac": 0.6514806378,
"autogenerated": false,
"ratio": 3.834061135371179,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4985541773171179,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# This should be moved to llvmpy
#
# There are different array kinds parameterized by eltype and nd
#
# Contiguous or Fortran
# struct {
# eltype *data;
# intp shape[nd];
# void *meta;
# } contiguous_array(eltype, nd)
#
# struct {
# eltype *data;
# intp shape[nd];
# intp stride[nd];
# void *meta;
# } strided_array(eltype, nd)
#
# # struct {
# eltype *data;
# diminfo shape[nd];
# void *meta;
# } new_strided_array(eltype, nd)
#
# struct {
# intp dim;
# intp stride;
#} diminfo
#
# These are for low-level array-routines that need to know the number
# of dimensions at run-time (not just code-generation time):
#
# The first two are recommended
#
# struct {
# eltype *data;
# int32 nd;
# intp shape[nd];
# void *meta;
# } contiguous_array_nd(eltype)
#
# struct {
# eltype *data;
# int32 nd;
# intp shape[nd];
# intp stride[nd];
# void *meta;
# } strided_array_nd(eltype)
#
# struct {
# eltype *data;
# int32 nd;
# diminfo shape[nd];
# void *meta;
# } new_strided_array_nd(eltype)
#
#
#
#
#
# The most general (where the kind of array is stored as well as number
# of dimensions)
# Rarely needed.
#
# struct {
# eltype *data;
# int16 nd;
# int16 dimkind;
# ???
# void *meta;
# } array_nd(eltype)
#
# where ??? is run-time interpreted based on the dimkind to either:
# intp shape[nd]; for dimkind = C_CONTIGUOUS or F_CONTIGUOUS
#
# diminfo shape[nd]; for dimkind = NEW_STRIDED
#
# intp shape[ind];
# intp strides[ind]; dimkind = STRIDED
#
# Array_C --- C Contiguous
# Array_F --- Fortran Contiguous
# Array_S --- Strided
# Array_CS --- Contiguous in last dimension and strided in others (same layout as Array_S)
# Array_FS --- Contiguous in first dimension and strided in others (same layout as Array_S)
# For 1-d, Array_C, Array_F, Array_CS, and Array_FS behave identically.
import llvm.core as lc
from llvm.core import Type, Constant
import llvm_cbuilder.shortnames as C
from ..py2help import reduce
# Non-array parameter types
SCALAR = 0
POINTER = 1
# Different Array Types
ARRAYBIT = 1<<4
C_CONTIGUOUS = ARRAYBIT + 0
F_CONTIGUOUS = ARRAYBIT + 1
STRIDED = ARRAYBIT + 2
NEW_STRIDED = ARRAYBIT + 3
HAS_ND = 1<<5
C_CONTIGUOUS_ND = C_CONTIGUOUS + HAS_ND
F_CONTIGUOUS_ND = F_CONTIGUOUS + HAS_ND
STRIDED_ND = STRIDED + HAS_ND
NEW_STRIDED_ND = NEW_STRIDED + HAS_ND
HAS_DIMKIND = 1<<6
C_CONTIGUOUS_DK = C_CONTIGUOUS + HAS_DIMKIND
F_CONTIGUOUS_DK = F_CONTIGUOUS + HAS_DIMKIND
STRIDED_DK = STRIDED + HAS_DIMKIND
NEW_STRIDED_DK = NEW_STRIDED + HAS_DIMKIND
# The first three are the most common --- the others are not typically supported
# STRIDED is basically the equivalent of NumPy
array_kinds = (C_CONTIGUOUS, F_CONTIGUOUS, STRIDED, NEW_STRIDED,
C_CONTIGUOUS_ND, F_CONTIGUOUS_ND, STRIDED_ND, NEW_STRIDED_ND,
C_CONTIGUOUS_DK, F_CONTIGUOUS_DK, STRIDED_DK, NEW_STRIDED_DK)
_invmap = {}
def kind_to_str(kind):
global _invmap
if not _invmap:
for key, value in globals().items():
if isinstance(value, int) and value in array_kinds:
_invmap[value] = key
return _invmap[kind]
def str_to_kind(str):
trial = eval(str)
if trial not in array_kinds:
raise ValueError("Invalid Array Kind")
return trial
void_type = C.void
int32_type = C.int32
char_type = C.char
int16_type = C.int16
intp_type = C.intp
int_type = C.int
char_p_type = lc.Type.pointer(C.char)
void_p_type = C.void_p
diminfo_type = Type.struct([intp_type, # shape
intp_type # stride
], name='diminfo')
zero_p = lc.Constant.int(intp_type, 0)
one_p = lc.Constant.int(intp_type, 1)
# We use a per-module cache because the LLVM linker wants a new struct
# with the same name in different modules.
# The linker does *not* like the *same* struct with the *same* name in
# two different modules.
_cache = {}
# This is the way we define LLVM arrays.
# C_CONTIGUOUS, F_CONTIGUOUS, and STRIDED are strongly encouraged...
def array_type(nd, kind, el_type=char_type, module=None):
base = kind & (~(HAS_ND | HAS_DIMKIND))
if base == C_CONTIGUOUS:
dimstr = 'Array_C'
elif base == F_CONTIGUOUS:
dimstr = 'Array_F'
elif base == STRIDED:
dimstr = 'Array_S'
elif base == NEW_STRIDED:
dimstr = 'Array_N'
else:
raise TypeError("Do not understand Array kind of %d" % kind)
if (kind & HAS_ND):
dimstr += '_ND'
elif (kind & HAS_DIMKIND):
dimstr += '_DK'
key = "%s_%s_%d" % (dimstr, str(el_type), nd)
if module is not None:
modcache = _cache.setdefault(module.id,{})
if key in modcache:
return modcache[key]
terms = [Type.pointer(el_type)] # data
if (kind & HAS_ND):
terms.append(int32_type) # nd
elif (kind & HAS_DIMKIND):
terms.extend([int16_type, int16_type]) # nd, dimkind
if base in [C_CONTIGUOUS, F_CONTIGUOUS]:
terms.append(Type.array(intp_type, nd)) # shape
elif base == NEW_STRIDED:
terms.append(Type.array(diminfo_type, nd)) # diminfo
elif base == STRIDED:
terms.extend([Type.array(intp_type, nd), # shape
Type.array(intp_type, nd)]) # strides
terms.append(void_p_type)
ret = Type.struct(terms, name=key)
if module is not None:
modcache[key] = ret
return ret
def check_array(arrtyp):
"""Converts an LLVM type into an llvm_array 'kind' for a
blaze kernel to use.
Parameters
----------
arrtyp : LLVM type
The LLVM type to convert into a 'kind'. This type should
have been created with the array_type function.
Returns
-------
None if the input parameter is not an array_type instance,
or a 3-tuple (array_kind, ndim, llvm_eltype). The array_kind
is an integer flags containing values like C_CONTIGUOUS, HAS_ND,
etc.
"""
if not isinstance(arrtyp, lc.StructType):
return None
if arrtyp.element_count not in [3, 4, 5, 6]:
return None
# Look through _cache and see if it's there
for key, value in _cache.items():
if arrtyp is value:
return key[0], key[1], value.elements[0].pointee
return _raw_check_array(arrtyp)
# Manual check
def _raw_check_array(arrtyp):
a0 = arrtyp.elements[0]
a1 = arrtyp.elements[1]
if not isinstance(a0, lc.PointerType) or \
not (isinstance(a1, lc.ArrayType) or
(a1 == int32_type) or (a1 == int16_type)):
return None
if not (arrtyp.elements[-1] == void_p_type):
return None
data_type = a0.pointee
if arrtyp.is_literal:
c_contig = True
else:
if arrtyp.name.startswith('Array_F'):
c_contig = False
else:
c_contig = True
if a1 == int32_type:
num = 2
strided = STRIDED_ND
new_strided = NEW_STRIDED_ND
c_contiguous = C_CONTIGUOUS_ND
f_contiguous = F_CONTIGUOUS_ND
elif a1 == int16_type:
if arrtyp.element_count < 3 or arrtyp.elements[2] != int16_type:
return None
num = 3
strided = STRIDED_DK
new_strided = NEW_STRIDED_DK
c_contiguous = C_CONTIGUOUS_DK
f_contiguous = F_CONTIGUOUS_DK
else:
num = 1
strided = STRIDED
new_strided = NEW_STRIDED
c_contiguous = C_CONTIGUOUS
f_contiguous = F_CONTIGUOUS
# otherwise we have lc.ArrType as element [1]
if arrtyp.element_count not in [num+2,num+3]:
return None
s1 = arrtyp.elements[num]
nd = s1.count
# Strided case
if arrtyp.element_count == num+3:
if not isinstance(arrtyp.elements[num+1], lc.ArrayType):
return None
s2 = arrtyp.elements[num+1]
if s1.element != intp_type or s2.element != intp_type:
return None
if s1.count != s2.count:
return None
return strided, nd, data_type
if s1.element == diminfo_type:
return new_strided, nd, data_type
elif s1.element == intp_type:
return c_contiguous if c_contig else f_contiguous, nd, data_type
else:
return None
# Returns c++ templates for Array_S, Array_F, Array_C, Array_N...
_template_cache = []
def get_cpp_template(typ='all'):
if len(_template_cache) == 0:
_make_cpp_templates()
templates = _template_cache
if typ in array_kinds:
indx = array_kinds.index(typ)
try:
if (typ & HAS_DIMKIND):
base = [templates[0], templates[2]]
elif (typ & HAS_ND):
base = [templates[0], templates[1]]
else:
base = [templates[0]]
if (typ & (~(HAS_ND | HAS_DIMKIND))) == NEW_STRIDED:
base.append(templates[3])
except TypeError:
base = templates[:4]
return '\n'.join(base+[templates[indx+4]])
else:
return '\n'.join(templates)
return
# Warning! This assumes that clang on the system
# has the same architecture as ctypes...
def _make_cpp_templates():
global _template_cache
_template_cache = []
import ctypes
plen = ctypes.sizeof(ctypes.c_size_t)
spaces = ' '*4
lsize = ctypes.sizeof(ctypes.c_long)
isize = ctypes.sizeof(ctypes.c_int)
llsize = ctypes.sizeof(ctypes.c_longlong)
shsize = ctypes.sizeof(ctypes.c_short)
if plen == lsize:
header = "%stypedef long intp;" % spaces
elif plen == isize:
header = "%stypdef int intp;" % spaces
elif plen == llsize:
header = "%stypedef longlong intp;" % spaces
else:
raise ValueError("Size of pointer not recognized.")
if lsize == 4:
header2 = "%stypedef long int32;" % spaces
elif isize == 4:
header2 = "%stypedef int int32;" % spaces
else:
raise ValueError("Cannot find typedef for 32-bit int;")
if isize == 2:
header3 = "%stypedef int int16;" % spaces
elif shsize == 2:
header3 = "%stypedef short int16;" % spaces
template_core = """
template<class T, int ndim>
struct {name} {{
T *data;
{middle}{dims};
void *meta;
}};
"""
header4 = """
template<class T>
struct diminfo {
T dim;
T stride;
};
"""
spaces = ' '*8
middle_map = {'': '',
'ND': 'int32 nd;\n%s' % spaces,
'DK': 'int16 nd;\n%sint16 dimkind;\n%s' % (spaces, spaces)
}
dims_map = {'F': 'intp dims[ndim]',
'C': 'intp dims[ndim]',
'N': 'diminfo<intp> dims[ndim]',
'S': 'intp dims[ndim];\n%sintp strides[ndim]' % spaces
}
templates = [header, header2, header3, header4]
for end in ['', 'ND', 'DK']:
for typ in ['C', 'F', 'S', 'N']:
name = '_'.join(['Array_%s' % typ]+([end] if end else []))
templates.append(template_core.format(name=name,
middle=middle_map[end],
dims=dims_map[typ]))
_template_cache.extend(templates)
return
zero_i = lc.Constant.int(int_type, 0)
one_i = lc.Constant.int(int_type, 1)
two_i = lc.Constant.int(int_type, 2)
three_i = lc.Constant.int(int_type, 3)
four_i = lc.Constant.int(int_type, 4)
def const_intp(value):
return Constant.int_signextend(intp_type, value)
def ptr_at(builder, ptr, idx):
return builder.gep(ptr, [auto_const_intp(idx)])
def load_at(builder, ptr, idx):
return builder.load(ptr_at(builder, ptr, idx))
def store_at(builder, ptr, idx, val):
builder.store(val, ptr_at(builder, ptr, idx))
def get_data_ptr(builder, arrptr):
val = builder.gep(arrptr, [zero_p, zero_i])
return builder.load(val)
def get_shape_ptr(builder, arrptr):
val = builder.gep(arrptr, [zero_p, one_i, zero_p])
return val
def get_strides_ptr(builder, arrptr):
return builder.gep(arrptr, [zero_p, two_i, zero_p])
def auto_const_intp(v):
if hasattr(v, '__index__'):
return const_intp(v.__index__())
else:
return v
# Assumes that unpacked structures with elements all of width > 32bits
# are the same as packed structures --- possibly not true on every platform.
def _sizeof(_eltype, unpacked=False):
msg = "Cannot determine size of unpacked structure with elements of size %d"
kind = _eltype.kind
if kind == lc.TYPE_INTEGER:
width = _eltype.width
if width % 8 != 0:
raise ValueError("Invalid bit-width on Integer")
if unpacked and width < 32:
raise ValueError(msg % width)
return width >> 3
elif kind == lc.TYPE_POINTER:
return intp_type.width >> 3
elif kind == lc.TYPE_FLOAT:
return 4
elif kind == lc.TYPE_DOUBLE:
return 8
elif kind == lc.TYPE_HALF:
if unpacked:
raise ValueError(msg % 2)
return 2
elif kind == lc.TYPE_FP128:
return 16
elif kind == lc.TYPE_ARRAY:
return _eltype.count * _sizeof(_eltype.element)
elif kind == lc.TYPE_STRUCT:
return sum(_sizeof(element, not _eltype.packed)
for element in _eltype.elements)
raise ValueError("Unimplemented type % s (kind=%s)" % (_eltype, kind))
orderchar = {C_CONTIGUOUS:'C',
F_CONTIGUOUS:'F',
STRIDED: 'S'}
kindfromchar = {}
for key, value in orderchar.items():
kindfromchar[value] = key
# Return the sizeof an LLVM-Type as a runtime value
# This will become a constant at run-time...
def sizeof(llvm_type, builder):
nullval = Constant.null(Type.pointer(llvm_type))
size = builder.gep(nullval, [one_i])
sizeI = builder.bitcast(size, int_type)
return sizeI
# Return the byte offset of a fieldnumber in a struct
# This will become a constant at run-time...
def offsetof(struct_type, fieldnum, builder):
nullval = Constant.null(Type.pointer(struct_type))
if hasattr(fieldnum, '__index__'):
fieldnum = fieldnum.__index__()
fieldnum = Constant.int(int_type, fieldnum)
offset = builder.gep(nullval, [zero_p, fieldnum])
offsetI = builder.bitcast(offset, int_type)
return offsetI
LLVM_SCALAR = [lc.TYPE_HALF, lc.TYPE_FP128,
lc.TYPE_DOUBLE, lc.TYPE_INTEGER, lc.TYPE_FLOAT]
# Embed an llvmvalue into an Array_S array with dimension nd
# by setting strides of new dimensions to 0.
# return the embedded array as an LLArray
# preload the shape and strides of the new array if preload
def embed(builder, llvmval, nd, preload=True):
raise NotImplementedError
kind = llvmval.type.kind
if kind in LLVM_SCALAR:
pass
if preload:
builder_val = builder
else:
builder_val = None
return LLArray(new_ptr, builder_val)
def isinteger(val):
if hasattr(val, '__index__'):
return True
if isinstance(val, lc.Value) and val.type.kind == lc.TYPE_INTEGER:
return True
return False
def isiterable(val):
import collections
return isinstance(val, collections.Iterable)
# An Array object wrapper around pointer to low-level LLVM array.
# allows pre-loading of strides, shape, so that
# slicing and element-access computation happens at Python level
# without un-necessary memory-access (loading)
# If builder is set during creation, then pre-load
# Otherwise, attach builder later as attribute
class LLArray(object):
_strides_ptr = None
_strides = None
_shape_ptr = None
_shape = None
_data_ptr = None
_freefuncs = []
_freedata = []
_builder_msg = "The builder attribute is not set."
def __init__(self, array_ptr, builder=None):
self.builder = builder
self.array_ptr = array_ptr
self.array_type = array_ptr.type.pointee
kind, nd, eltype = check_array(self.array_type)
self._kind = kind
self.nd = nd
self._eltype = eltype
try:
self._order = orderchar[kind]
except KeyError:
raise ValueError("Unsupported array type %s" % kind)
self._itemsize = _sizeof(eltype)
if builder is not None:
_ = self.strides # execute property codes to pre-load
_ = self.shape
_ = self.data
@property
def strides(self):
if self._kind != STRIDED:
return None
if not self._strides:
if self.builder is None:
raise ValueError(self._builder_msg)
if self.nd > 0:
self._strides_ptr = get_strides_ptr(self.builder, self.array_ptr)
self._strides = self.preload(self._strides_ptr)
return self._strides
@property
def shape(self):
if not self._shape:
if self.builder is None:
raise ValueError(self._builder_msg)
if self.nd > 0:
self._shape_ptr = get_shape_ptr(self.builder, self.array_ptr)
self._shape = self.preload(self._shape_ptr)
return self._shape
@property
def data(self):
if not self._data_ptr:
if self.builder is None:
raise ValueError(self._builder_msg)
self._data_ptr = get_data_ptr(self.builder, self.array_ptr)
return self._data_ptr
@property
def itemsize(self):
if self.builder:
return sizeof(self._eltype, self.builder)
else:
return Constant.int(int_type, self._itemsize)
@property
def module(self):
return self.builder.basic_block.function.module if self.builder else None
def preload(self, llarray_ptr, count=None):
if llarray_ptr is None:
return None
if count is None:
count = self.nd
return [load_at(self.builder, llarray_ptr, i) for i in range(count)]
def getview(self, nd=None, kind=None, eltype=None):
newtype = array_type(self.nd if nd is None else nd,
self._kind if kind is None else kind,
self._eltype if eltype is None else eltype,
self.module)
new = self.builder.alloca(newtype)
return LLArray(new)
def getptr(self, *indices):
assert len(indices) == self.nd
indices = [auto_const_intp(x) for x in indices]
shape = self.shape
strides = self.strides
order = self._order
data = self._data_ptr
builder = self.builder
intp = intp_type
if self.nd == 0:
ptr = builder.gep(data, [zero_p])
elif order in 'CF':
# optimize for C and F contiguous
if order == 'F':
shape = list(reversed(shape))
loc = Constant.null(intp)
for ival, sval in zip(indices, shape[1:]):
tmp = builder.mul(ival, sval)
loc = builder.add(loc, tmp)
loc = builder.add(loc, indices[-1])
ptr = builder.gep(data, [loc])
else:
# any order
loc = Constant.null(intp)
for i, s in zip(indices, strides):
tmp = builder.mul(i, s)
loc = builder.add(loc, tmp)
base = builder.ptrtoint(data, intp)
target = builder.add(base, loc)
ptr = builder.inttoptr(target, data.type)
return ptr
#Can be used to get subarrays as well as elements
def __getitem__(self, key):
from .llgetitem import from_Array
isiter = isiterable(key)
char = orderchar[self._kind]
# full-indexing
if (isiter and len(key) == self.nd) or \
(self.nd == 1 and isinteger(key)):
if isiter:
if any(x in [Ellipsis, None] for x in key):
return from_Array(self, key, char)
else:
args = key
else:
args = (key.__index__(),)
ptr = self.getptr(*args)
return self.builder.load(ptr)
elif self._kind in [C_CONTIGUOUS, F_CONTIGUOUS, STRIDED]:
return from_Array(self, key, char)
else:
raise NotImplementedError
# Could use memcopy and memmove to implement full slicing capability
# But for now just works for single element
def __setitem__(self, key, value):
if not isinstance(key, tuple) and len(key) != self.nd:
raise NotImplementedError
ptr = self.getptr(*key)
self.builder.store(value, ptr)
# Static alloca the structure and malloc the data
# There is no facility for ensuring lifetime of the memory
# So this array should *not* be used in another thread
# shape is a Python list of integers or llvm ints
# eltype is an llvm type
# This is intended for temporary use only.
def create(self, shape=None, kind=None, eltype=None, malloc=None, free=None, order='K'):
res = create_array(self.builder, shape or self.shape,
kind or self._kind,
eltype or self._eltype,
malloc, free, order)
new, freefuncs, char_data = res
self._freefuncs.append(free)
self._freedata.append(char_data)
return LLArray(new)
def _dealloc(self):
for freefunc, freedatum in zip(self._freefuncs, self._freedata):
self.builder.call(freefunc, [freedatum])
self._freefuncs = []
self._freedata = []
# Static alloca the structure and malloc the data
# There is no facility for ensuring lifetime of the memory
# So this array should *not* be used in another thread
# shape is a Python list of integers or llvm ints
# eltype is an llvm type
# This is intended for temporary use only.
def create_array(builder, shape, kind, eltype, malloc=None, free=None, order='K'):
import operator
if malloc is None:
malloc, free = _default_malloc_free(builder.basic_block.function.module)
nd = len(shape)
newtype = array_type(nd, kind, eltype, builder.basic_block.function.module)
new = builder.alloca(newtype)
shape_ptr = get_shape_ptr(builder, new)
# Store shape
for i, val in enumerate(shape):
store_at(builder, shape_ptr, i, auto_const_intp(val))
# if shape is all integers then we can pre-multiply to get size.
# Otherwise, we will have to compute the size in the code.
if all(hasattr(x, '__index__') for x in shape):
shape = [x.__index__() for x in shape]
total = reduce(operator.mul, shape, _sizeof(eltype))
arg = Constant.int(intp_type, total)
precompute=True
else:
precompute=False
result = sizeof(eltype, builder)
for val in shape:
result = builder.mul(result, auto_const_intp(val))
arg = result
char_data = builder.call(malloc, [arg])
data = builder.bitcast(char_data, Type.pointer(eltype))
data_ptr = builder.gep(new, [zero_p, zero_i])
builder.store(data, data_ptr)
if kind == STRIDED:
# Fill the strides array depending on order
if order == 'K':
# if it's strided, I suppose we should choose based on which is
# larger in self, the first or last stride for now just 'C'
order = 'F' if kind is F_CONTIGUOUS else 'C'
if order == 'C':
range2 = range(nd-1, -1, -1)
func = operator.sub
elif order == 'F':
range2 = range(0, nd, 1)
func = operator.add
else:
raise ValueError("Invalid order given")
range1 = range2[:-1]
range3 = [func(v, 1) for v in range1]
strides_ptr = get_strides_ptr(builder, new)
if precompute:
strides_list = [sizeof(eltype)]
value = strides_list[0]
for index in range1:
value = value * shape[index]
strides_list.append(value)
for stride, index in zip(strides_list, range2):
sval = Constant.int(intp_type, stride)
store_at(builder, strides_ptr, index, sval)
else:
sval = Constant.int(intp_type, sizeof(eltype))
store_at(builder, strides_ptr, range1[0], sval)
for sh_index, st_index in (range1, range3):
val = load_at(builder, shape_ptr, sh_index)
sval = builder.mul(sval, val)
store_at(builder, strides_ptr, st_index, sval)
return new, free, char_data
malloc_sig = lc.Type.function(char_p_type, [intp_type])
free_sig = lc.Type.function(void_type, [char_p_type])
def _default_malloc_free(mod):
malloc = mod.get_or_insert_function(malloc_sig, 'malloc')
free = mod.get_or_insert_function(free_sig, 'free')
return malloc, free
def test():
arr = array_type(5, C_CONTIGUOUS)
assert check_array(arr) == (C_CONTIGUOUS, 5, char_type)
arr = array_type(4, STRIDED)
assert check_array(arr) == (STRIDED, 4, char_type)
arr = array_type(3, NEW_STRIDED)
assert check_array(arr) == (NEW_STRIDED, 3, char_type)
if __name__ == '__main__':
test()
| {
"repo_name": "xsixing/blaze",
"path": "blaze/compute/llvm_array.py",
"copies": "2",
"size": "25281",
"license": "bsd-3-clause",
"hash": 6182586962852621000,
"line_mean": 31.1641221374,
"line_max": 92,
"alpha_frac": 0.5919069657,
"autogenerated": false,
"ratio": 3.431654676258993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5023561641958992,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from collections import defaultdict
import datetime
import itertools
import enum
from psycopg2.extensions import AsIs, ISQLQuote
import pytz
import nfldb.category
from nfldb.db import now, Tx
import nfldb.sql as sql
import nfldb.team
__pdoc__ = {}
def _stat_categories():
"""
Returns a `collections.OrderedDict` of all statistical categories
available for play-by-play data.
"""
cats = OrderedDict()
for row in nfldb.category.categories:
cat_type = Enums.category_scope[row[2]]
cats[row[3]] = Category(row[3], row[0], cat_type, row[1], row[4])
return cats
def _nflgame_start_time(schedule):
"""
Given an entry in `nflgame.schedule`, return the start time of the
game in UTC.
"""
# Hack to get around ambiugous times for weird London games.
if schedule['eid'] == '2015100400':
d = datetime.datetime(2015, 10, 4, 9, 30)
return pytz.timezone('US/Eastern').localize(d).astimezone(pytz.utc)
elif schedule['eid'] == '2015102500':
d = datetime.datetime(2015, 10, 25, 9, 30)
return pytz.timezone('US/Eastern').localize(d).astimezone(pytz.utc)
elif schedule['eid'] == '2015110100':
d = datetime.datetime(2015, 11, 1, 9, 30)
return pytz.timezone('US/Eastern').localize(d).astimezone(pytz.utc)
# Year is always the season, so we bump it if the month is Jan-March.
year, month, day = schedule['year'], schedule['month'], schedule['day']
if 1 <= schedule['month'] <= 3:
year += 1
# BUG: Getting the hour here will be wrong if a game starts before Noon
# EST. Not sure what to do about it...
hour, minute = schedule['time'].strip().split(':')
minute = int(minute)
if hour == '12':
hour = 12
else:
hour = (int(hour) + 12) % 24
d = datetime.datetime(year, month, day, hour, minute)
return pytz.timezone('US/Eastern').localize(d).astimezone(pytz.utc)
def _nflgame_clock(clock):
"""
Given a `nflgame.game.GameClock` object, convert and return it as
a `nfldb.Clock` object.
"""
phase = Enums._nflgame_game_phase[clock.quarter]
elapsed = Clock._phase_max - ((clock._minutes * 60) + clock._seconds)
return Clock(phase, elapsed)
def _play_time(drive, play, next_play):
"""
Given a `nfldb.Play` object without time information and a
`nfldb.Drive` object, returns a `nfldb.Clock` object representing
the play's game clock. `next_play` must be a `nfldb.Play` object
corresponding to the next play in `drive` with valid time data, or
it can be `None` if one isn't available.
This is used for special non-plays like "Two-Minute Warning" or
timeouts. The source JSON data leaves the clock field NULL, but we
want to do better than that.
The drive is used to guess the quarter of a timeout and two-minute
warning.
"""
assert not play.time # Never do this when the play has time data!
desc = play.description.lower()
if next_play is not None and ('timeout' in desc or 'warning' in desc):
return next_play.time
elif 'end game' in desc or 'end of game' in desc:
return Clock(Enums.game_phase.Final, 0)
elif 'end quarter' in desc:
qtr = int(desc.strip()[12])
if qtr == 2:
return Clock(Enums.game_phase.Half, 0)
elif qtr == 5:
return Clock(Enums.game_phase.OT, Clock._phase_max)
elif qtr == 6:
return Clock(Enums.game_phase.OT2, Clock._phase_max)
else:
return Clock(Enums.game_phase['Q%d' % qtr], Clock._phase_max)
elif 'end of quarter' in desc:
if drive.start_time.phase is Enums.game_phase.Q2:
return Clock(Enums.game_phase.Half, 0)
else:
return Clock(drive.start_time.phase, Clock._phase_max)
elif 'end of half' in desc:
return Clock(Enums.game_phase.Half, 0)
return None
def _next_play_with(plays, play, pred):
"""
Returns the next `nfldb.Play` after `play` in `plays` where `pred`
returns True (given a `nfldb.Play` object). If such a play does
not exist, then `None` is returned.
"""
get_next = False
for p in plays:
if get_next:
# Don't take a play that isn't satisfied.
# e.g. for time, Two timeouts in a row, or a two-minute warning
# next to a timeout.
if not pred(p):
continue
return p
if p.play_id == play.play_id:
get_next = True
return None
def _fill(db, fill_with, to_fill, attr):
"""
Fills a list of entities `to_fill` with the entity `fill_with`.
An instance of the appropriate `fill_with` entity is assigned
to the `attr` of `to_fill`.
"""
pk = fill_with._sql_tables['primary']
def pkval(entobj):
return tuple(getattr(entobj, k) for k in pk)
import nfldb.query
ids = list(set(pkval(obj) for obj in to_fill))
if len(ids) == 0:
return
objs = nfldb.query._entities_by_ids(db, fill_with, *ids)
byid = dict([(pkval(obj), obj) for obj in objs])
for obj in to_fill:
setattr(obj, attr, byid[pkval(obj)])
def _total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
# Taken from Python 2.7 stdlib to support 2.6.
convert = {
'__lt__': [('__gt__',
lambda self, other: not (self < other or self == other)),
('__le__',
lambda self, other: self < other or self == other),
('__ge__',
lambda self, other: not self < other)],
'__le__': [('__ge__',
lambda self, other: not self <= other or self == other),
('__lt__',
lambda self, other: self <= other and not self == other),
('__gt__',
lambda self, other: not self <= other)],
'__gt__': [('__lt__',
lambda self, other: not (self > other or self == other)),
('__ge__',
lambda self, other: self > other or self == other),
('__le__',
lambda self, other: not self > other)],
'__ge__': [('__le__',
lambda self, other: (not self >= other) or self == other),
('__gt__',
lambda self, other: self >= other and not self == other),
('__lt__',
lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation: '
'< > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
class _Enum (enum.Enum):
"""
Conforms to the `getquoted` interface in psycopg2. This maps enum
types to SQL and back.
"""
@staticmethod
def _pg_cast(enum):
"""
Returns a function to cast a SQL enum to the enumeration type
corresponding to `enum`. Namely, `enum` should be a member of
`nfldb.Enums`.
"""
return lambda sqlv, _: None if not sqlv else enum[sqlv]
def __conform__(self, proto):
if proto is ISQLQuote:
return AsIs("'%s'" % self.name)
return None
def __str__(self):
return self.name
# Why can't I use the `_total_ordering` decorator on this class?
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._value_ < other._value_
def __le__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._value_ <= other._value_
def __gt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._value_ > other._value_
def __ge__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._value_ >= other._value_
class Enums (object):
"""
Enums groups all enum types used in the database schema.
All possible values for each enum type are represented as lists.
The ordering of each list is the same as the ordering in the
database. In particular, this ordering specifies a total ordering
that can be used in Python code to compare values in the same
enumeration.
"""
game_phase = _Enum('game_phase',
['Pregame', 'Q1', 'Q2', 'Half',
'Q3', 'Q4', 'OT', 'OT2', 'Final'])
"""
Represents the phase of the game. e.g., `Q1` or `Half`.
"""
season_phase = _Enum('season_phase',
['Preseason', 'Regular', 'Postseason'])
"""
Represents one of the three phases of an NFL season: `Preseason`,
`Regular` or `Postseason`.
"""
game_day = _Enum('game_day',
['Sunday', 'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday'])
"""
The day of the week on which a game was played. The week starts
on `Sunday`.
"""
player_pos = _Enum('player_pos',
['C', 'CB', 'DB', 'DE', 'DL', 'DT', 'FB', 'FS', 'G',
'ILB', 'K', 'LB', 'LS', 'MLB', 'NT', 'OG', 'OL', 'OLB',
'OT', 'P', 'QB', 'RB', 'SAF', 'SS', 'T', 'TE', 'WR',
'UNK'])
"""
The set of all possible player positions in abbreviated form.
"""
player_status = _Enum('player_status',
['Active', 'InjuredReserve', 'NonFootballInjury',
'Suspended', 'PUP', 'UnsignedDraftPick',
'Exempt', 'Unknown'])
"""
The current status of a player that is actively on a
roster. The statuses are taken from the key at the bottom of
http://goo.gl/HHsnjD
"""
category_scope = _Enum('category_scope', ['play', 'player'])
"""
The scope of a particular statistic. Typically, statistics refer
to a specific `player`, but sometimes a statistic refers to the
totality of a play. For example, `third_down_att` is a `play`
statistic that records third down attempts.
Currently, `play` and `player` are the only possible values.
Note that this type is not represented directly in the database
schema. Values of this type are constructed from data in
`category.py`.
"""
_nflgame_season_phase = {
'PRE': season_phase.Preseason,
'REG': season_phase.Regular,
'POST': season_phase.Postseason,
}
"""
Maps a season type in `nflgame` to a `nfldb.Enums.season_phase`.
"""
_nflgame_game_phase = {
'Pregame': game_phase.Pregame,
'Halftime': game_phase.Half,
'Final': game_phase.Final,
'final': game_phase.Final,
1: game_phase.Q1,
2: game_phase.Q2,
3: game_phase.Half,
4: game_phase.Q3,
5: game_phase.Q4,
6: game_phase.OT,
7: game_phase.OT2,
}
"""
Maps a game phase in `nflgame` to a `nfldb.Enums.game_phase`.
"""
_nflgame_game_day = {
'Sun': game_day.Sunday,
'Mon': game_day.Monday,
'Tue': game_day.Tuesday,
'Wed': game_day.Wednesday,
'Thu': game_day.Thursday,
'Fri': game_day.Friday,
'Sat': game_day.Saturday,
}
"""
Maps a game day of the week in `nflgame` to a
`nfldb.Enums.game_day`.
"""
_nflgame_player_status = {
'ACT': player_status.Active,
'RES': player_status.InjuredReserve,
'NON': player_status.NonFootballInjury,
'Suspended': player_status.Suspended,
'PUP': player_status.PUP,
'UDF': player_status.UnsignedDraftPick,
'EXE': player_status.Exempt,
# Everything else is `player_status.Unknown`
}
class Category (object):
"""
Represents meta data about a statistical category. This includes
the category's scope, GSIS identifier, name and short description.
"""
__slots__ = ['category_id', 'gsis_number', 'category_type',
'is_real', 'description']
def __init__(self, category_id, gsis_number, category_type,
is_real, description):
self.category_id = category_id
"""
A unique name for this category.
"""
self.gsis_number = gsis_number
"""
A unique numeric identifier for this category.
"""
self.category_type = category_type
"""
The scope of this category represented with
`nfldb.Enums.category_scope`.
"""
self.is_real = is_real
"""
Whether this statistic is a real number or not. Currently,
only the `defense_sk` statistic has `Category.is_real` set to
`True`.
"""
self.description = description
"""
A free-form text description of this category.
"""
@property
def _sql_field(self):
"""
The SQL definition of this column. Statistics are always
NOT NULL and have a default value of `0`.
When `Category.is_real` is `True`, then the SQL type is `real`.
Otherwise, it's `smallint`.
"""
typ = 'real' if self.is_real else 'smallint'
default = '0.0' if self.is_real else '0'
return '%s %s NOT NULL DEFAULT %s' % (self.category_id, typ, default)
def __str__(self):
return self.category_id
def __eq__(self, other):
return self.category_id == other.category_id
# We've got to put the stat category stuff here because we need the
# Enums class defined. But `Play` and `PlayPlayer` need these
# categories to fill in __slots__ in their definition too. Ugly.
stat_categories = _stat_categories()
__pdoc__['stat_categories'] = """
An ordered dictionary of every statistical category available for
play-by-play data. The keys are the category identifier (e.g.,
`passing_yds`) and the values are `nfldb.Category` objects.
"""
_play_categories = OrderedDict(
[(n, c) for n, c in stat_categories.items()
if c.category_type is Enums.category_scope.play])
_player_categories = OrderedDict(
[(n, c) for n, c in stat_categories.items()
if c.category_type is Enums.category_scope.player])
# Don't document these fields because there are too many.
# Instead, the API docs will include a link to a Wiki page with a table
# of stat categories.
for cat in _play_categories.values():
__pdoc__['Play.%s' % cat.category_id] = None
for cat in _player_categories.values():
__pdoc__['Play.%s' % cat.category_id] = None
__pdoc__['PlayPlayer.%s' % cat.category_id] = None
class Team (object):
"""
Represents information about an NFL team. This includes its
standard three letter abbreviation, city and mascot name.
"""
# BUG: If multiple databases are used with different team information,
# this class won't behave correctly since it's using a global cache.
__slots__ = ['team_id', 'city', 'name']
__cache = defaultdict(dict)
def __new__(cls, db, abbr):
abbr = nfldb.team.standard_team(abbr)
if abbr in Team.__cache:
return Team.__cache[abbr]
return object.__new__(cls)
def __init__(self, db, abbr):
"""
Introduces a new team given an abbreviation and a database
connection. The database connection is used to retrieve other
team information if it isn't cached already. The abbreviation
given is passed to `nfldb.standard_team` for you.
"""
if hasattr(self, 'team_id'):
# Loaded from cache.
return
self.team_id = nfldb.team.standard_team(abbr)
"""
The unique team identifier represented as its standard
2 or 3 letter abbreviation.
"""
self.city = None
"""
The city where this team resides.
"""
self.name = None
"""
The full "mascot" name of this team.
"""
if self.team_id not in Team.__cache:
with Tx(db) as cur:
cur.execute('SELECT * FROM team WHERE team_id = %s',
(self.team_id,))
row = cur.fetchone()
self.city = row['city']
self.name = row['name']
Team.__cache[self.team_id] = self
def __str__(self):
return '%s %s' % (self.city, self.name)
def __conform__(self, proto):
if proto is ISQLQuote:
return AsIs("'%s'" % self.team_id)
return None
@_total_ordering
class FieldPosition (object):
"""
Represents field position.
The representation is an integer offset where the 50 yard line
corresponds to '0'. Being in one's own territory corresponds to a
negative offset while being in the opponent's territory corresponds
to a positive offset.
e.g., NE has the ball on the NE 45, the offset is -5.
e.g., NE has the ball on the NYG 2, the offset is 48.
This class also defines a total ordering on field
positions. Namely, given f1 and f2, f1 < f2 if and only if f2
is closer to the goal line for the team with possession of the
football.
"""
__slots__ = ['_offset']
@staticmethod
def _pg_cast(sqlv, cursor):
if not sqlv:
return FieldPosition(None)
return FieldPosition(int(sqlv[1:-1]))
@staticmethod
def from_str(pos):
"""
Given a string `pos` in the format `FIELD YARDLINE`, this
returns a new `FieldPosition` object representing the yardline
given. `FIELD` must be the string `OWN` or `OPP` and `YARDLINE`
must be an integer in the range `[0, 50]`.
For example, `OPP 19` corresponds to an offset of `31`
and `OWN 5` corresponds to an offset of `-45`. Midfield can be
expressed as either `MIDFIELD`, `OWN 50` or `OPP 50`.
"""
if pos.upper() == 'MIDFIELD':
return FieldPosition(0)
field, yrdline = pos.split(' ')
field, yrdline = field.upper(), int(yrdline)
assert field in ('OWN', 'OPP')
assert 0 <= yrdline <= 50
if field == 'OWN':
return FieldPosition(yrdline - 50)
else:
return FieldPosition(50 - yrdline)
def __init__(self, offset):
"""
Makes a new `nfldb.FieldPosition` given a field `offset`.
`offset` must be in the integer range [-50, 50].
"""
if offset is None:
self._offset = None
return
assert -50 <= offset <= 50
self._offset = offset
def _add_yards(self, yards):
"""
Returns a new `nfldb.FieldPosition` with `yards` added to this
field position. The value of `yards` may be negative.
"""
assert self.valid
newoffset = max(-50, min(50, self._offset + yards))
return FieldPosition(newoffset)
@property
def valid(self):
"""
Returns `True` if and only if this field position is known and
valid.
Invalid field positions cannot be compared with other field
positions.
"""
return self._offset is not None
def __add__(self, other):
if isinstance(other, FieldPosition):
toadd = other._offset
else:
toadd = other
newoffset = max(-50, min(50, self._offset + toadd))
return FieldPosition(newoffset)
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
if not self.valid:
return True
if not other.valid:
return False
return self._offset < other._offset
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._offset == other._offset
def __str__(self):
if not self.valid:
return 'N/A'
elif self._offset > 0:
return 'OPP %d' % (50 - self._offset)
elif self._offset < 0:
return 'OWN %d' % (50 + self._offset)
else:
return 'MIDFIELD'
def __conform__(self, proto):
if proto is ISQLQuote:
if not self.valid:
return AsIs("NULL")
else:
return AsIs("ROW(%d)::field_pos" % self._offset)
return None
@_total_ordering
class PossessionTime (object):
"""
Represents the possession time of a drive in seconds.
This class defines a total ordering on possession times. Namely, p1
< p2 if and only if p2 corresponds to a longer time of possession
than p1.
"""
__slots__ = ['_seconds']
@staticmethod
def from_str(clock_str):
"""
Introduces a `nfldb.PossessionTime` object from a string
formatted as clock time. For example, `2:00` corresponds to
`120` seconds and `14:39` corresponds to `879` seconds.
"""
minutes, seconds = map(int, clock_str.split(':', 1))
return PossessionTime((minutes * 60) + seconds)
@staticmethod
def _pg_cast(sqlv, cursor):
return PossessionTime(int(sqlv[1:-1]))
def __init__(self, seconds):
"""
Returns a `nfldb.PossessionTime` object given the number of
seconds of the possession.
"""
assert isinstance(seconds, int)
self._seconds = seconds
@property
def valid(self):
"""
Returns `True` if and only if this possession time has a valid
representation.
Invalid possession times cannot be compared with other
possession times.
"""
return self._seconds is not None
@property
def total_seconds(self):
"""
The total seconds elapsed for this possession.
`0` is returned if this is not a valid possession time.
"""
return self._seconds if self.valid else 0
@property
def minutes(self):
"""
The number of whole minutes for a possession.
e.g., `0:59` would be `0` minutes and `4:01` would be `4`
minutes.
`0` is returned if this is not a valid possession time.
"""
return (self._seconds // 60) if self.valid else 0
@property
def seconds(self):
"""
The seconds portion of the possession time.
e.g., `0:59` would be `59` seconds and `4:01` would be `1`
second.
`0` is returned if this is not a valid possession time.
"""
return (self._seconds % 60) if self.valid else 0
def __str__(self):
if not self.valid:
return 'N/A'
else:
return '%02d:%02d' % (self.minutes, self.seconds)
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
assert self.valid and other.valid
return self._seconds < other._seconds
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self._seconds == other._seconds
def __conform__(self, proto):
if proto is ISQLQuote:
if not self.valid:
return AsIs("NULL")
else:
return AsIs("ROW(%d)::pos_period" % self._seconds)
return None
@_total_ordering
class Clock (object):
"""
Represents a single point in time during a game. This includes the
quarter and the game clock time in addition to other phases of the
game such as before the game starts, half time, overtime and when
the game ends.
Note that the clock time does not uniquely identify a play, since
not all plays consume time on the clock. (e.g., A two point
conversion.)
This class defines a total ordering on clock times. Namely, c1 < c2
if and only if c2 is closer to the end of the game than c1.
"""
_nonqs = (Enums.game_phase.Pregame, Enums.game_phase.Half,
Enums.game_phase.Final)
"""
The phases of the game that do not have a time component.
"""
_phase_max = 900
"""
The maximum number of seconds in a game phase.
"""
@staticmethod
def from_str(phase, clock):
"""
Introduces a new `nfldb.Clock` object given strings of the game
phase and the clock. `phase` may be one of the values in the
`nfldb.Enums.game_phase` enumeration. `clock` must be a clock
string in the format `MM:SS`, e.g., `4:01` corresponds to a
game phase with 4 minutes and 1 second remaining.
"""
assert getattr(Enums.game_phase, phase, None) is not None, \
'"%s" is not a valid game phase. choose one of %s' \
% (phase, map(str, Enums.game_phase))
minutes, seconds = map(int, clock.split(':', 1))
elapsed = Clock._phase_max - ((minutes * 60) + seconds)
return Clock(Enums.game_phase[phase], int(elapsed))
@staticmethod
def _pg_cast(sqlv, cursor):
"""
Casts a SQL string of the form `(game_phase, elapsed)` to a
`nfldb.Clock` object.
"""
phase, elapsed = map(str.strip, sqlv[1:-1].split(','))
return Clock(Enums.game_phase[phase], int(elapsed))
def __init__(self, phase, elapsed):
"""
Introduces a new `nfldb.Clock` object. `phase` should
be a value from the `nfldb.Enums.game_phase` enumeration
while `elapsed` should be the number of seconds elapsed in
the `phase`. Note that `elapsed` is only applicable when
`phase` is a quarter (including overtime). In all other
cases, it will be set to `0`.
`elapsed` should be in the range `[0, 900]` where `900`
corresponds to the clock time `0:00` and `0` corresponds
to the clock time `15:00`.
"""
assert isinstance(phase, Enums.game_phase)
assert 0 <= elapsed <= Clock._phase_max
if phase in Clock._nonqs:
elapsed = 0
self.phase = phase
"""
The phase represented by this clock object. It is guaranteed
to have type `nfldb.Enums.game_phase`.
"""
self.elapsed = elapsed
"""
The number of seconds remaining in this clock's phase of the
game. It is always set to `0` whenever the phase is not a
quarter in the game.
"""
def add_seconds(self, seconds):
"""
Adds the number of seconds given to the current clock time
and returns a new clock time. `seconds` may be positive
or negative. If a boundary is reached (e.g., `Pregame` or
`Final`), then subtracting or adding more seconds has no
effect.
"""
elapsed = self.elapsed + seconds
phase_jump = 0
if elapsed < 0 or elapsed > Clock._phase_max:
phase_jump = elapsed // Clock._phase_max
# Always skip over halftime.
phase_val = self.phase.value + phase_jump
if self.phase.value <= Enums.game_phase.Half.value <= phase_val:
phase_val += 1
elif phase_val <= Enums.game_phase.Half.value <= self.phase.value:
phase_val -= 1
try:
phase = Enums.game_phase(phase_val)
return Clock(phase, elapsed % (1 + Clock._phase_max))
except ValueError:
if phase_val < 0:
return Clock(Enums.game_phase.Pregame, 0)
return Clock(Enums.game_phase.Final, 0)
@property
def minutes(self):
"""
If the clock has a time component, then the number of whole
minutes **left in this phase** is returned. Otherwise, `0` is
returned.
"""
if self.elapsed == 0:
return 0
return (Clock._phase_max - self.elapsed) // 60
@property
def seconds(self):
"""
If the clock has a time component, then the number of seconds
**left in this phase** is returned. Otherwise, `0` is returned.
"""
if self.elapsed == 0:
return 0
return (Clock._phase_max - self.elapsed) % 60
def __str__(self):
phase = self.phase
if phase in Clock._nonqs:
return phase.name
else:
return '%s %02d:%02d' % (phase.name, self.minutes, self.seconds)
def __lt__(self, o):
if self.__class__ is not o.__class__:
return NotImplemented
return (self.phase, self.elapsed) < (o.phase, o.elapsed)
def __eq__(self, o):
if self.__class__ is not o.__class__:
return NotImplemented
return self.phase == o.phase and self.elapsed == o.elapsed
def __conform__(self, proto):
if proto is ISQLQuote:
return AsIs("ROW('%s', %d)::game_time"
% (self.phase.name, self.elapsed))
return None
class SQLPlayer (sql.Entity):
__slots__ = []
_sql_tables = {
'primary': ['player_id'],
'managed': ['player'],
'tables': [
('player', ['gsis_name', 'full_name', 'first_name',
'last_name', 'team', 'position', 'profile_id',
'profile_url', 'uniform_number', 'birthdate',
'college', 'height', 'weight', 'years_pro', 'status',
]),
],
'derived': [],
}
class Player (SQLPlayer):
"""
A representation of an NFL player. Note that the representation
is inherently ephemeral; it always corresponds to the most recent
knowledge about a player.
Most of the fields in this object can have a `None` value. This is
because the source JSON data only guarantees that a GSIS identifier
and abbreviated name will be available. The rest of the player meta
data is scraped from NFL.com's team roster pages (which invites
infrequent uncertainty).
"""
__slots__ = SQLPlayer.sql_fields() + ['_db']
_existing = None
"""
A cache of existing player ids in the database.
This is only used when saving data to detect if a player
needs to be added.
"""
@staticmethod
def _from_nflgame(db, p):
"""
Given `p` as a `nflgame.player.PlayPlayerStats` object,
`_from_nflgame` converts `p` to a `nfldb.Player` object.
"""
dbp = Player(db)
dbp.player_id = p.playerid
dbp.gsis_name = p.name
if p.player is not None:
meta = ['full_name', 'first_name', 'last_name', 'team', 'position',
'profile_id', 'profile_url', 'uniform_number', 'birthdate',
'college', 'height', 'weight', 'years_pro', 'status']
for k in meta:
v = getattr(p.player, k, '')
if not v:
# Normalize all empty values to `None`
v = None
setattr(dbp, k, v)
# Convert position and status values to an enumeration.
dbp.position = getattr(Enums.player_pos,
dbp.position or '',
Enums.player_pos.UNK)
trans = Enums._nflgame_player_status
dbp.status = trans.get(dbp.status or '',
Enums.player_status.Unknown)
if getattr(dbp, 'position', None) is None:
dbp.position = Enums.player_pos.UNK
if getattr(dbp, 'status', None) is None:
dbp.status = Enums.player_status.Unknown
dbp.team = nfldb.team.standard_team(getattr(dbp, 'team', ''))
return dbp
@staticmethod
def _from_nflgame_player(db, p):
"""
Given `p` as a `nflgame.player.Player` object,
`_from_nflgame_player` converts `p` to a `nfldb.Player` object.
"""
# This hack translates `nflgame.player.Player` to something like
# a `nflgame.player.PlayPlayerStats` object that can be converted
# with `nfldb.Player._from_nflgame`.
class _Player (object):
def __init__(self):
self.playerid = p.player_id
self.name = p.gsis_name
self.player = p
return Player._from_nflgame(db, _Player())
@staticmethod
def from_id(db, player_id):
"""
Given a player GSIS identifier (e.g., `00-0019596`) as a string,
returns a `nfldb.Player` object corresponding to `player_id`.
This function will always execute a single SQL query.
If no corresponding player is found, `None` is returned.
"""
import nfldb.query
q = nfldb.query.Query(db)
players = q.player(player_id=player_id).limit(1).as_players()
if len(players) == 0:
return None
return players[0]
def __init__(self, db):
"""
Creates a new and empty `nfldb.Player` object with the given
database connection.
This constructor should not be used by clients. Instead, you
should get `nfldb.Player` objects from `nfldb.Query` or from
one of the other constructors, like `nfldb.Player.from_id` or
`nfldb.Player.from_row_dict`. (The latter is useful only if
you're writing your own SQL queries.)
"""
self._db = db
self.player_id = None
"""
The player_id linking this object `nfldb.PlayPlayer` object.
N.B. This is the GSIS identifier string. It always has length
10.
"""
self.gsis_name = None
"""
The name of a player from the source GameCenter data. This
field is guaranteed to contain a name.
"""
self.full_name = None
"""The full name of a player."""
self.first_name = None
"""The first name of a player."""
self.last_name = None
"""The last name of a player."""
self.team = None
"""
The team that the player is currently active on. If the player
is no longer playing or is a free agent, this value may
correspond to the `UNK` (unknown) team.
"""
self.position = None
"""
The current position of a player if it's available. This may
be **not** be `None`. If the position is not known, then the
`UNK` enum is used from `nfldb.Enums.player_pos`.
"""
self.profile_id = None
"""
The profile identifier used on a player's canonical NFL.com
profile page. This is used as a foreign key to connect varying
sources of information.
"""
self.profile_url = None
"""The NFL.com profile URL for this player."""
self.uniform_number = None
"""A player's uniform number as an integer."""
self.birthdate = None
"""A player's birth date as a free-form string."""
self.college = None
"""A player's college as a free-form string."""
self.height = None
"""A player's height as a free-form string."""
self.weight = None
"""A player's weight as a free-form string."""
self.years_pro = None
"""The number of years a player has played as an integer."""
self.status = None
"""The current status of this player as a free-form string."""
def _save(self, cursor):
if Player._existing is None:
Player._existing = set()
cursor.execute('SELECT player_id FROM player')
for row in cursor.fetchall():
Player._existing.add(row['player_id'])
if self.player_id not in Player._existing:
super(Player, self)._save(cursor)
Player._existing.add(self.player_id)
def __str__(self):
name = self.full_name if self.full_name else self.gsis_name
if not name:
name = self.player_id # Yikes.
return '%s (%s, %s)' % (name, self.team, self.position)
def __lt__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
if self.full_name and other.full_name:
return self.full_name < other.full_name
return self.gsis_name < other.gsis_name
def __eq__(self, other):
if self.__class__ is not other.__class__:
return NotImplemented
return self.player_id == other.player_id
class SQLPlayPlayer (sql.Entity):
__slots__ = []
_sql_tables = {
'primary': ['gsis_id', 'drive_id', 'play_id', 'player_id'],
'managed': ['play_player'],
'tables': [('play_player', ['team'] + _player_categories.keys())],
'derived': ['offense_yds', 'offense_tds', 'defense_tds', 'points'],
}
# These fields are combined using `GREATEST`.
_derived_combined = {
'offense_yds': ['passing_yds', 'rushing_yds', 'receiving_yds',
'fumbles_rec_yds'],
'offense_tds': ['passing_tds', 'receiving_tds', 'rushing_tds',
'fumbles_rec_tds'],
'defense_tds': ['defense_frec_tds', 'defense_int_tds',
'defense_misc_tds'],
}
_point_values = [
('defense_frec_tds', 6),
('defense_int_tds', 6),
('defense_misc_tds', 6),
('fumbles_rec_tds', 6),
('kicking_rec_tds', 6),
('kickret_tds', 6),
('passing_tds', 6),
('puntret_tds', 6),
('receiving_tds', 6),
('rushing_tds', 6),
('kicking_xpmade', 1),
('passing_twoptm', 2),
('receiving_twoptm', 2),
('rushing_twoptm', 2),
('kicking_fgm', 3),
('defense_safe', 2),
]
@classmethod
def _sql_field(cls, name, aliases=None):
if name in cls._derived_combined:
fields = cls._derived_combined[name]
fields = [cls._sql_field(f, aliases=aliases) for f in fields]
return 'GREATEST(%s)' % ', '.join(fields)
elif name == 'points':
fields = ['(%s * %d)' % (cls._sql_field(f, aliases=aliases), pval)
for f, pval in cls._point_values]
return 'GREATEST(%s)' % ', '.join(fields)
else:
return super(SQLPlayPlayer, cls)._sql_field(name, aliases=aliases)
class PlayPlayer (SQLPlayPlayer):
"""
A "play player" is a statistical grouping of categories for a
single player inside a play. For example, passing the ball to
a receiver necessarily requires two "play players": the pass
(by player X) and the reception (by player Y). Statistics that
aren't included, for example, are blocks and penalties. (Although
penalty information can be gleaned from a play's free-form
`nfldb.Play.description` attribute.)
Each `nfldb.PlayPlayer` object belongs to exactly one
`nfldb.Play` and exactly one `nfldb.Player`.
Any statistical categories not relevant to this particular play
and player default to `0`.
Most of the statistical fields are documented on the
[statistical categories](http://goo.gl/wZstcY)
wiki page. Each statistical field is an instance attribute in
this class.
"""
__slots__ = SQLPlayPlayer.sql_fields() \
+ ['_db', '_play', '_player', '_fields']
# Document instance variables for derived SQL fields.
# We hide them from the public interface, but make the doco
# available to nfldb-mk-stat-table. Evil!
__pdoc__['PlayPlayer.offense_yds'] = None
__pdoc__['_PlayPlayer.offense_yds'] = \
'''
Corresponds to any yardage that is manufactured by the offense.
Namely, the following fields:
`nfldb.PlayPlayer.passing_yds`,
`nfldb.PlayPlayer.rushing_yds`,
`nfldb.PlayPlayer.receiving_yds` and
`nfldb.PlayPlayer.fumbles_rec_yds`.
This field is useful when searching for plays by net yardage
regardless of how the yards were obtained.
'''
__pdoc__['PlayPlayer.offense_tds'] = None
__pdoc__['_PlayPlayer.offense_tds'] = \
'''
Corresponds to any touchdown manufactured by the offense via
a passing, reception, rush or fumble recovery.
'''
__pdoc__['PlayPlayer.defense_tds'] = None
__pdoc__['_PlayPlayer.defense_tds'] = \
'''
Corresponds to any touchdown manufactured by the defense.
e.g., a pick-6, fumble recovery TD, punt/FG block TD, etc.
'''
__pdoc__['PlayPlayer.points'] = \
"""
The number of points scored in this player statistic. This
accounts for touchdowns, extra points, two point conversions,
field goals and safeties.
"""
@staticmethod
def _from_nflgame(db, p, pp):
"""
Given `p` as a `nfldb.Play` object and `pp` as a
`nflgame.player.PlayPlayerStats` object, `_from_nflgame`
converts `pp` to a `nfldb.PlayPlayer` object.
"""
team = nfldb.team.standard_team(pp.team)
dbpp = PlayPlayer(db)
dbpp.gsis_id = p.gsis_id
dbpp.drive_id = p.drive_id
dbpp.play_id = p.play_id
dbpp.player_id = pp.playerid
dbpp.team = team
for k in _player_categories.keys():
if pp._stats.get(k, 0) != 0:
setattr(dbpp, k, pp._stats[k])
dbpp._play = p
dbpp._player = Player._from_nflgame(db, pp)
return dbpp
@staticmethod
def fill_plays(db, play_players):
"""
Given a list of `play_players`, fill all of their `play` attributes
using as few queries as possible. This will also fill the
plays with drive data and each drive with game data.
"""
_fill(db, Play, play_players, '_play')
Play.fill_drives(db, [pp._play for pp in play_players])
Drive.fill_games(db, [pp._play._drive for pp in play_players])
@staticmethod
def fill_players(db, play_players):
"""
Given a list of `play_players`, fill all of their `player`
attributes using as few queries as possible.
"""
_fill(db, Player, play_players, '_player')
def __init__(self, db):
"""
Creates a new and empty `nfldb.PlayPlayer` object with the
given database connection.
This constructor should not be used by clients. Instead,
you should get `nfldb.PlayPlayer` objects
from `nfldb.Query` or from one of the other
constructors, like `nfldb.PlayPlayer.from_id` or
`nfldb.PlayPlayer.from_row_dict`. (The latter is useful only if
you're writing your own SQL queries.)
"""
self._db = db
self._play = None
self._player = None
self._fields = None
self.gsis_id = None
"""
The GSIS identifier for the game that this "play player"
belongs to.
"""
self.drive_id = None
"""
The numeric drive identifier for this "play player". It may be
interpreted as a sequence number.
"""
self.play_id = None
"""
The numeric play identifier for this "play player". It can
typically be interpreted as a sequence number scoped to its
corresponding game.
"""
self.player_id = None
"""
The player_id linking these stats to a `nfldb.Player` object.
Use `nfldb.PlayPlayer.player` to access player meta data.
N.B. This is the GSIS identifier string. It always has length
10.
"""
self.team = None
"""
The team that this player belonged to when he recorded the
statistics in this play.
"""
@property
def fields(self):
"""The set of non-zero statistical fields set."""
if self._fields is None:
self._fields = set()
for k in _player_categories.keys():
if getattr(self, k, 0) != 0:
self._fields.add(k)
return self._fields
@property
def play(self):
"""
The `nfldb.Play` object that this "play player" belongs
to. The play is retrieved from the database if necessary.
"""
if self._play is None:
self._play = Play.from_id(self._db, self.gsis_id, self.drive_id,
self.play_id)
return self._play
@property
def player(self):
"""
The `nfldb.Player` object that this "play player"
corresponds to. The player is retrieved from the database if
necessary.
"""
if self._player is None:
self._player = Player.from_id(self._db, self.player_id)
return self._player
@property
def scoring_team(self):
"""
If this is a scoring statistic, returns the team that scored.
Otherwise, returns None.
N.B. `nfldb.PlayPlayer.scoring_team` returns a valid team if
and only if `nfldb.PlayPlayer.points` is greater than 0.
"""
if self.points > 0:
return self.team
return None
@property
def guess_position(self):
"""
Guesses the position of this player based on the statistical
categories present.
Note that this only distinguishes the offensive positions of
QB, RB, WR, P and K. If defensive stats are detected, then
the position returned defaults to LB.
"""
stat_to_pos = [
('passing_att', 'QB'), ('rushing_att', 'RB'),
('receiving_tar', 'WR'), ('punting_tot', 'P'),
('kicking_tot', 'K'), ('kicking_fga', 'K'), ('kicking_xpa', 'K'),
]
for c in stat_categories:
if c.startswith('defense_'):
stat_to_pos.append((c, 'LB'))
for stat, pos in stat_to_pos:
if getattr(self, stat) != 0:
return Enums.player_pos[pos]
return Enums.player_pos.UNK
def _save(self, cursor):
if self._player is not None:
self._player._save(cursor)
super(PlayPlayer, self)._save(cursor)
def _add(self, b):
"""
Given two `nfldb.PlayPlayer` objects, `_add` accumulates `b`
into `self`. Namely, no new `nfldb.PlayPlayer` objects are
created.
Both `self` and `b` must refer to the same player, or else an
assertion error is raised.
The `nfldb.aggregate` function should be used to sum collections
of `nfldb.PlayPlayer` objects (or objects that can provide
`nfldb.PlayPlayer` objects).
"""
a = self
assert a.player_id == b.player_id
a.gsis_id = a.gsis_id if a.gsis_id == b.gsis_id else None
a.drive_id = a.drive_id if a.drive_id == b.drive_id else None
a.play_id = a.play_id if a.play_id == b.play_id else None
a.team = a.team if a.team == b.team else None
for cat in _player_categories:
setattr(a, cat, getattr(a, cat) + getattr(b, cat))
# Try to copy player meta data too.
if a._player is None and b._player is not None:
a._player = b._player
# A play attached to aggregate statistics is always wrong.
a._play = None
def _copy(self):
"""Returns a copy of `self`."""
pp = PlayPlayer(self._db)
pp.gsis_id = self.gsis_id
pp.drive_id = self.drive_id
pp.play_id = self.play_id
pp.player_id = self.player_id
pp.team = self.team
ga, sa = getattr, setattr
for k in _player_categories:
v = getattr(self, k, 0)
if v != 0:
sa(pp, k, v)
pp._player = self._player
pp._play = self._play
return pp
def __add__(self, b):
pp = self._copy()
pp.add(b)
return pp
def __str__(self):
d = {}
for cat in _player_categories:
v = getattr(self, cat, 0)
if v != 0:
d[cat] = v
return repr(d)
def __getattr__(self, k):
if k in PlayPlayer.__slots__:
return 0
raise AttributeError(k)
class SQLPlay (sql.Entity):
__slots__ = []
_sql_tables = {
'primary': ['gsis_id', 'drive_id', 'play_id'],
'managed': ['play'],
'tables': [
('play', ['time', 'pos_team', 'yardline', 'down', 'yards_to_go',
'description', 'note', 'time_inserted', 'time_updated',
] + _play_categories.keys()),
('agg_play', _player_categories.keys()),
],
'derived': ['offense_yds', 'offense_tds', 'defense_tds', 'points',
'game_date'],
}
@classmethod
def _sql_field(cls, name, aliases=None):
if name in PlayPlayer._derived_combined:
fields = [cls._sql_field(f, aliases=aliases)
for f in PlayPlayer._derived_combined[name]]
return 'GREATEST(%s)' % ', '.join(fields)
elif name == 'points':
fields = ['(%s * %d)' % (cls._sql_field(f, aliases=aliases), pval)
for f, pval in PlayPlayer._point_values]
return 'GREATEST(%s)' % ', '.join(fields)
elif name == 'game_date':
gsis_id = cls._sql_field('gsis_id', aliases=aliases)
return 'SUBSTRING(%s from 1 for 8)' % gsis_id
else:
return super(SQLPlay, cls)._sql_field(name, aliases=aliases)
class Play (SQLPlay):
"""
Represents a single play in an NFL game. Each play has an
assortment of meta data, possibly including the time on the clock
in which the ball was snapped, the starting field position, the
down, yards to go, etc. Not all plays have values for each field
(for example, a timeout is considered a play but has no data for
`nfldb.Play.down` or `nfldb.Play.yardline`).
In addition to meta data describing the context of the game at the time
the ball was snapped, plays also have statistics corresponding to the
fields in `nfldb.stat_categories` with a `nfldb.Category.category_type`
of `play`. For example, `third_down_att`, `fourth_down_failed` and
`fourth_down_conv`. While the binary nature of these fields suggest
a boolean value, they are actually integers. This makes them amenable
to aggregation.
Plays are also associated with player statistics or "events" that
occurred in a play. For example, in a single play one player could
pass the ball to another player. This is recorded as two different
player statistics: a pass and a reception. Each one is represented
as a `nfldb.PlayPlayer` object. Plays may have **zero or more** of
these player statistics.
Finally, it is important to note that there are (currently) some
useful statistics missing. For example, there is currently no
reliable means of determining the time on the clock when the play
finished. Also, there is no field describing the field position at
the end of the play, although this may be added in the future.
Most of the statistical fields are documented on the
[statistical categories](http://goo.gl/YY587P)
wiki page. Each statistical field is an instance attribute in
this class.
"""
__slots__ = SQLPlay.sql_fields() + ['_db', '_drive', '_play_players']
# Document instance variables for derived SQL fields.
# We hide them from the public interface, but make the doco
# available to nfldb-mk-stat-table. Evil!
__pdoc__['Play.offense_yds'] = None
__pdoc__['_Play.offense_yds'] = \
'''
Corresponds to any yardage that is manufactured by the offense.
Namely, the following fields:
`nfldb.Play.passing_yds`,
`nfldb.Play.rushing_yds`,
`nfldb.Play.receiving_yds` and
`nfldb.Play.fumbles_rec_yds`.
This field is useful when searching for plays by net yardage
regardless of how the yards were obtained.
'''
__pdoc__['Play.offense_tds'] = None
__pdoc__['_Play.offense_tds'] = \
'''
Corresponds to any touchdown manufactured by the offense via
a passing, reception, rush or fumble recovery.
'''
__pdoc__['Play.defense_tds'] = None
__pdoc__['_Play.defense_tds'] = \
'''
Corresponds to any touchdown manufactured by the defense.
e.g., a pick-6, fumble recovery TD, punt/FG block TD, etc.
'''
__pdoc__['Play.points'] = \
"""
The number of points scored in this player statistic. This
accounts for touchdowns, extra points, two point conversions,
field goals and safeties.
"""
@staticmethod
def _from_nflgame(db, d, p):
"""
Given `d` as a `nfldb.Drive` object and `p` as a
`nflgame.game.Play` object, `_from_nflgame` converts `p` to a
`nfldb.Play` object.
"""
# Fix up some fields so they meet the constraints of the schema.
# The `time` field is cleaned up afterwards in
# `nfldb.Drive._from_nflgame`, since it needs data about surrounding
# plays.
time = None if not p.time else _nflgame_clock(p.time)
yardline = FieldPosition(getattr(p.yardline, 'offset', None))
down = p.down if 1 <= p.down <= 4 else None
team = p.team if p.team is not None and len(p.team) > 0 else 'UNK'
dbplay = Play(db)
dbplay.gsis_id = d.gsis_id
dbplay.drive_id = d.drive_id
dbplay.play_id = int(p.playid)
dbplay.time = time
dbplay.pos_team = team
dbplay.yardline = yardline
dbplay.down = down
dbplay.yards_to_go = p.yards_togo
dbplay.description = p.desc
dbplay.note = p.note
for k in _play_categories.keys():
if p._stats.get(k, 0) != 0:
setattr(dbplay, k, p._stats[k])
# Note that `Play` objects also normally contain aggregated
# statistics, but we forgo that here because this constructor
# is only used to load plays into the database.
dbplay._drive = d
dbplay._play_players = []
for pp in p.players:
dbpp = PlayPlayer._from_nflgame(db, dbplay, pp)
dbplay._play_players.append(dbpp)
return dbplay
@staticmethod
def from_id(db, gsis_id, drive_id, play_id):
"""
Given a GSIS identifier (e.g., `2012090500`) as a string,
an integer drive id and an integer play id, this returns a
`nfldb.Play` object corresponding to the given identifiers.
If no corresponding play is found, then `None` is returned.
"""
import nfldb.query
q = nfldb.query.Query(db)
q.play(gsis_id=gsis_id, drive_id=drive_id, play_id=play_id).limit(1)
plays = q.as_plays()
if len(plays) == 0:
return None
return plays[0]
@staticmethod
def fill_drives(db, plays):
"""
Given a list of `plays`, fill all of their `drive` attributes
using as few queries as possible. This will also fill the
drives with game data.
"""
_fill(db, Drive, plays, '_drive')
Drive.fill_games(db, [p._drive for p in plays])
def __init__(self, db):
"""
Creates a new and empty `nfldb.Play` object with the given
database connection.
This constructor should not be used by clients. Instead, you
should get `nfldb.Play` objects from `nfldb.Query` or from one
of the other constructors, like `nfldb.Play.from_id` or
`nfldb.Play.from_row_dict`. (The latter is useful only if you're
writing your own SQL queries.)
"""
self._db = db
self._drive = None
self._play_players = None
self.gsis_id = None
"""
The GSIS identifier for the game that this play belongs to.
"""
self.drive_id = None
"""
The numeric drive identifier for this play. It may be
interpreted as a sequence number.
"""
self.play_id = None
"""
The numeric play identifier for this play. It can typically
be interpreted as a sequence number scoped to the week that
this game was played, but it's unfortunately not completely
consistent.
"""
self.time = None
"""
The time on the clock when the play started, represented with
a `nfldb.Clock` object.
"""
self.pos_team = None
"""
The team in possession during this play, represented as
a team abbreviation string. Use the `nfldb.Team` constructor
to get more information on a team.
"""
self.yardline = None
"""
The starting field position of this play represented with
`nfldb.FieldPosition`.
"""
self.down = None
"""
The down on which this play begin. This may be `0` for
"special" plays like timeouts or 2 point conversions.
"""
self.yards_to_go = None
"""
The number of yards to go to get a first down or score a
touchdown at the start of the play.
"""
self.description = None
"""
A (basically) free-form text description of the play. This is
typically what you see on NFL GameCenter web pages.
"""
self.note = None
"""
A miscellaneous note field (as a string). Not sure what it's
used for.
"""
self.time_inserted = None
"""
The date and time that this play was added to the
database. This can be very useful when sorting plays by the
order in which they occurred in real time. Unfortunately, such
a sort requires that play data is updated relatively close to
when it actually occurred.
"""
self.time_updated = None
"""The date and time that this play was last updated."""
@property
def drive(self):
"""
The `nfldb.Drive` object that contains this play. The drive is
retrieved from the database if it hasn't been already.
"""
if self._drive is None:
self._drive = Drive.from_id(self._db, self.gsis_id, self.drive_id)
return self._drive
@property
def play_players(self):
"""
A list of all `nfldb.PlayPlayer`s in this play. They are
automatically retrieved from the database if they haven't been
already.
If there are no players attached to this play, then an empty
list is returned.
"""
if self._play_players is None:
import nfldb.query
q = nfldb.query.Query(self._db)
q.play_player(gsis_id=self.gsis_id, drive_id=self.drive_id,
play_id=self.play_id)
self._play_players = q.as_play_players()
for pp in self._play_players:
pp._play = self
return self._play_players
@property
def scoring_team(self):
"""
If this is a scoring play, returns the team that scored points.
Otherwise, returns None.
N.B. `nfldb.Play.scoring_team` returns a valid team if and only
if `nfldb.Play.points` is greater than 0.
"""
for pp in self.play_players:
t = pp.scoring_team
if t is not None:
return t
return None
def score(self, before=False):
"""
Returns the score of the game immediately after this play as a
tuple of the form `(home_score, away_score)`.
If `before` is `True`, then the score will *not* include this
play.
"""
game = Game.from_id(self._db, self.gsis_id)
if not before:
return game.score_at_time(self.time.add_seconds(1))
s = game.score_at_time(self.time)
# The heuristic in `nfldb.Game.score_in_plays` blends TDs and XPs
# into a single play (with respect to scoring). So we have to undo
# that if we want the score of the game after a TD but before an XP.
if self.kicking_xpmade == 1:
score_team = self.scoring_team
if score_team == game.home_team:
return (s[0] - 1, s[1])
return (s[0], s[1] - 1)
return s
def _save(self, cursor):
super(Play, self)._save(cursor)
# Remove any "play players" that are stale.
cursor.execute('''
DELETE FROM play_player
WHERE gsis_id = %s AND drive_id = %s AND play_id = %s
AND NOT (player_id = ANY (%s))
''', (self.gsis_id, self.drive_id, self.play_id,
[p.player_id for p in (self._play_players or [])]))
for pp in (self._play_players or []):
pp._save(cursor)
def __str__(self):
if self.down:
return '(%s, %s, %s, %d and %d) %s' \
% (self.pos_team, self.yardline, self.time.phase,
self.down, self.yards_to_go, self.description)
elif self.pos_team:
return '(%s, %s, %s) %s' \
% (self.pos_team, self.yardline, self.time.phase,
self.description)
else:
return '(%s) %s' % (self.time.phase, self.description)
def __getattr__(self, k):
if k in Play.__slots__:
return 0
raise AttributeError(k)
class SQLDrive (sql.Entity):
__slots__ = []
_sql_tables = {
'primary': ['gsis_id', 'drive_id'],
'managed': ['drive'],
'tables': [
('drive', ['start_field', 'start_time', 'end_field', 'end_time',
'pos_team', 'pos_time', 'first_downs', 'result',
'penalty_yards', 'yards_gained', 'play_count',
'time_inserted', 'time_updated',
]),
],
'derived': [],
}
class Drive (SQLDrive):
"""
Represents a single drive in an NFL game. Each drive has an
assortment of meta data, possibly including the start and end
times, the start and end field positions, the result of the drive,
the number of penalties and first downs, and more.
Each drive corresponds to **zero or more** plays. A drive usually
corresponds to at least one play, but if the game is active, there
exist valid ephemeral states where a drive has no plays.
"""
__slots__ = SQLDrive.sql_fields() + ['_db', '_game', '_plays']
@staticmethod
def _from_nflgame(db, g, d):
"""
Given `g` as a `nfldb.Game` object and `d` as a
`nflgame.game.Drive` object, `_from_nflgame` converts `d` to a
`nfldb.Drive` object.
Generally, this function should not be used. It is called
automatically by `nfldb.Game._from_nflgame`.
"""
dbd = Drive(db)
dbd.gsis_id = g.gsis_id
dbd.drive_id = d.drive_num
dbd.start_time = _nflgame_clock(d.time_start)
dbd.start_field = FieldPosition(getattr(d.field_start, 'offset', None))
dbd.end_field = FieldPosition(d.field_end.offset)
dbd.end_time = _nflgame_clock(d.time_end)
dbd.pos_team = nfldb.team.standard_team(d.team)
dbd.pos_time = PossessionTime(d.pos_time.total_seconds())
dbd.first_downs = d.first_downs
dbd.result = d.result
dbd.penalty_yards = d.penalty_yds
dbd.yards_gained = d.total_yds
dbd.play_count = d.play_cnt
dbd._game = g
candidates = []
for play in d.plays:
candidates.append(Play._from_nflgame(db, dbd, play))
# At this point, some plays don't have valid game times. Fix it!
# If we absolutely cannot fix it, drop the play. Maintain integrity!
dbd._plays = []
for play in candidates:
if play.time is None:
next = _next_play_with(candidates, play, lambda p: p.time)
play.time = _play_time(dbd, play, next)
if play.time is not None:
dbd._plays.append(play)
dbd._plays.sort(key=lambda p: p.play_id)
return dbd
@staticmethod
def from_id(db, gsis_id, drive_id):
"""
Given a GSIS identifier (e.g., `2012090500`) as a string
and a integer drive id, this returns a `nfldb.Drive` object
corresponding to the given identifiers.
If no corresponding drive is found, then `None` is returned.
"""
import nfldb.query
q = nfldb.query.Query(db)
q.drive(gsis_id=gsis_id, drive_id=drive_id).limit(1)
drives = q.as_drives()
if len(drives) == 0:
return None
return drives[0]
@staticmethod
def fill_games(db, drives):
"""
Given a list of `drives`, fill all of their `game` attributes
using as few queries as possible.
"""
_fill(db, Game, drives, '_game')
def __init__(self, db):
"""
Creates a new and empty `nfldb.Drive` object with the given
database connection.
This constructor should not be used by clients. Instead, you
should get `nfldb.Drive` objects from `nfldb.Query` or from one
of the other constructors, like `nfldb.Drive.from_id` or
`nfldb.Drive.from_row_dict`. (The latter is useful only if you're
writing your own SQL queries.)
"""
self._db = db
self._game = None
self._plays = None
self.gsis_id = None
"""
The GSIS identifier for the game that this drive belongs to.
"""
self.drive_id = None
"""
The numeric drive identifier for this drive. It may be
interpreted as a sequence number.
"""
self.start_field = None
"""
The starting field position of this drive represented
with `nfldb.FieldPosition`.
"""
self.start_time = None
"""
The starting clock time of this drive, represented with
`nfldb.Clock`.
"""
self.end_field = None
"""
The ending field position of this drive represented with
`nfldb.FieldPosition`.
"""
self.end_time = None
"""
The ending clock time of this drive, represented with
`nfldb.Clock`.
"""
self.pos_team = None
"""
The team in possession during this drive, represented as
a team abbreviation string. Use the `nfldb.Team` constructor
to get more information on a team.
"""
self.pos_time = None
"""
The possession time of this drive, represented with
`nfldb.PossessionTime`.
"""
self.first_downs = None
"""
The number of first downs that occurred in this drive.
"""
self.result = None
"""
A freeform text field straight from NFL's GameCenter data that
sometimes contains the result of a drive (e.g., `Touchdown`).
"""
self.penalty_yards = None
"""
The number of yards lost or gained from penalties in this
drive.
"""
self.yards_gained = None
"""
The total number of yards gained or lost in this drive.
"""
self.play_count = None
"""
The total number of plays executed by the offense in this
drive.
"""
self.time_inserted = None
"""The date and time that this drive was added."""
self.time_updated = None
"""The date and time that this drive was last updated."""
@property
def game(self):
"""
Returns the `nfldb.Game` object that contains this drive. The
game is retrieved from the database if it hasn't been already.
"""
if self._game is None:
return Game.from_id(self._db, self.gsis_id)
return self._game
@property
def plays(self):
"""
A list of all `nfldb.Play`s in this drive. They are
automatically retrieved from the database if they haven't been
already.
If there are no plays in the drive, then an empty list is
returned.
"""
if self._plays is None:
import nfldb.query
q = nfldb.query.Query(self._db)
q.sort([('time', 'asc'), ('play_id', 'asc')])
q.play(gsis_id=self.gsis_id, drive_id=self.drive_id)
self._plays = q.as_plays()
for p in self._plays:
p._drive = self
return self._plays
def score(self, before=False):
"""
Returns the score of the game immediately after this drive as a
tuple of the form `(home_score, away_score)`.
If `before` is `True`, then the score will *not* include this
drive.
"""
if before:
return self.game.score_at_time(self.start_time)
else:
return self.game.score_at_time(self.end_time)
@property
def play_players(self):
"""
A list of `nfldb.PlayPlayer` objects in this drive. Data is
retrieved from the database if it hasn't been already.
"""
pps = []
for play in self.plays:
for pp in play.play_players:
pps.append(pp)
return pps
def _save(self, cursor):
super(Drive, self)._save(cursor)
if not self._plays:
return
# Remove any plays that are stale.
cursor.execute('''
DELETE FROM play
WHERE gsis_id = %s AND drive_id = %s AND NOT (play_id = ANY (%s))
''', (self.gsis_id, self.drive_id, [p.play_id for p in self._plays]))
for play in (self._plays or []):
play._save(cursor)
def __str__(self):
s = '[%-12s] %-3s from %-6s to %-6s '
s += '(lasted %s - %s to %s)'
return s % (
self.result, self.pos_team, self.start_field, self.end_field,
self.pos_time, self.start_time, self.end_time,
)
class SQLGame (sql.Entity):
__slots__ = []
_sql_tables = {
'primary': ['gsis_id'],
'managed': ['game'],
'tables': [
('game', ['gamekey', 'start_time', 'week', 'day_of_week',
'season_year', 'season_type', 'finished',
'home_team', 'home_score', 'home_score_q1',
'home_score_q2', 'home_score_q3', 'home_score_q4',
'home_score_q5', 'home_turnovers',
'away_team', 'away_score', 'away_score_q1',
'away_score_q2', 'away_score_q3', 'away_score_q4',
'away_score_q5', 'away_turnovers',
'time_inserted', 'time_updated']),
],
'derived': ['winner', 'loser'],
}
@classmethod
def _sql_field(cls, name, aliases=None):
if name in ('winner', 'loser'):
params = ('home_score', 'away_score', 'home_team', 'away_team')
d = dict([(k, cls._sql_field(k, aliases=aliases)) for k in params])
d['cmp'] = '>' if name == 'winner' else '<'
return '''(
CASE WHEN {home_score} {cmp} {away_score} THEN {home_team}
WHEN {away_score} {cmp} {home_score} THEN {away_team}
ELSE ''
END
)'''.format(**d)
else:
return super(SQLGame, cls)._sql_field(name, aliases=aliases)
class Game (SQLGame):
"""
Represents a single NFL game in the preseason, regular season or
post season. Each game has an assortment of meta data, including
a quarterly breakdown of scores, turnovers, the time the game
started, the season week the game occurred in, and more.
Each game corresponds to **zero or more** drives. A game usually
corresponds to at least one drive, but if the game is active, there
exist valid ephemeral states where a game has no drives.
"""
__slots__ = SQLGame.sql_fields() + ['_db', '_drives', '_plays']
# Document instance variables for derived SQL fields.
__pdoc__['Game.winner'] = '''The winner of this game.'''
__pdoc__['Game.loser'] = '''The loser of this game.'''
@staticmethod
def _from_nflgame(db, g):
"""
Converts a `nflgame.game.Game` object to a `nfldb.Game`
object.
`db` should be a psycopg2 connection returned by
`nfldb.connect`.
"""
dbg = Game(db)
dbg.gsis_id = g.eid
dbg.gamekey = g.gamekey
dbg.start_time = _nflgame_start_time(g.schedule)
dbg.week = g.schedule['week']
dbg.day_of_week = Enums._nflgame_game_day[g.schedule['wday']]
dbg.season_year = g.schedule['year']
dbg.season_type = Enums._nflgame_season_phase[g.schedule['season_type']]
dbg.finished = g.game_over()
dbg.home_team = nfldb.team.standard_team(g.home)
dbg.home_score = g.score_home
dbg.home_score_q1 = g.score_home_q1
dbg.home_score_q2 = g.score_home_q2
dbg.home_score_q3 = g.score_home_q3
dbg.home_score_q4 = g.score_home_q4
dbg.home_score_q5 = g.score_home_q5
dbg.home_turnovers = int(g.data['home']['to'])
dbg.away_team = nfldb.team.standard_team(g.away)
dbg.away_score = g.score_away
dbg.away_score_q1 = g.score_away_q1
dbg.away_score_q2 = g.score_away_q2
dbg.away_score_q3 = g.score_away_q3
dbg.away_score_q4 = g.score_away_q4
dbg.away_score_q5 = g.score_away_q5
dbg.away_turnovers = int(g.data['away']['to'])
# If it's been 8 hours since game start, we always conclude finished!
if (now() - dbg.start_time).total_seconds() >= (60 * 60 * 8):
dbg.finished = True
dbg._drives = []
for drive in g.drives:
if not hasattr(drive, 'game'):
continue
dbg._drives.append(Drive._from_nflgame(db, dbg, drive))
dbg._drives.sort(key=lambda d: d.drive_id)
return dbg
@staticmethod
def _from_schedule(db, s):
"""
Converts a schedule dictionary from the `nflgame.schedule`
module to a bare-bones `nfldb.Game` object.
"""
# This is about as evil as it gets. Duck typing to the MAX!
class _Game (object):
def __init__(self):
self.schedule = s
self.home, self.away = s['home'], s['away']
self.eid = s['eid']
self.gamekey = s['gamekey']
self.drives = []
self.game_over = lambda: False
zeroes = ['score_%s', 'score_%s_q1', 'score_%s_q2',
'score_%s_q3', 'score_%s_q4', 'score_%s_q5']
for which, k in itertools.product(('home', 'away'), zeroes):
setattr(self, k % which, 0)
self.data = {'home': {'to': 0}, 'away': {'to': 0}}
return Game._from_nflgame(db, _Game())
@staticmethod
def from_id(db, gsis_id):
"""
Given a GSIS identifier (e.g., `2012090500`) as a string,
returns a `nfldb.Game` object corresponding to `gsis_id`.
If no corresponding game is found, `None` is returned.
"""
import nfldb.query
q = nfldb.query.Query(db)
games = q.game(gsis_id=gsis_id).limit(1).as_games()
if len(games) == 0:
return None
return games[0]
def __init__(self, db):
"""
Creates a new and empty `nfldb.Game` object with the given
database connection.
This constructor should not be used by clients. Instead, you
should get `nfldb.Game` objects from `nfldb.Query` or from one
of the other constructors, like `nfldb.Game.from_id` or
`nfldb.Game.from_row_dict`. (The latter is useful only if you're
writing your own SQL queries.)
"""
self._db = db
"""
The psycopg2 database connection.
"""
self._drives = None
self._plays = None
self.gsis_id = None
"""
The NFL GameCenter id of the game. It is a string
with 10 characters. The first 8 correspond to the date of the
game, while the last 2 correspond to an id unique to the week that
the game was played.
"""
self.gamekey = None
"""
Another unique identifier for a game used by the
NFL. It is a sequence number represented as a 5 character string.
The gamekey is specifically used to tie games to other resources,
like the NFL's content delivery network.
"""
self.start_time = None
"""
A Python datetime object corresponding to the start time of
the game. The timezone of this value will be equivalent to the
timezone specified by `nfldb.set_timezone` (which is by default
set to the value specified in the configuration file).
"""
self.week = None
"""
The week number of this game. It is always relative
to the phase of the season. Namely, the first week of preseason
is 1 and so is the first week of the regular season.
"""
self.day_of_week = None
"""
The day of the week this game was played on.
Possible values correspond to the `nfldb.Enums.game_day` enum.
"""
self.season_year = None
"""
The year of the season of this game. This
does not necessarily match the year that the game was played. For
example, games played in January 2013 are in season 2012.
"""
self.season_type = None
"""
The phase of the season. e.g., `Preseason`,
`Regular season` or `Postseason`. All valid values correspond
to the `nfldb.Enums.season_phase`.
"""
self.finished = None
"""
A boolean that is `True` if and only if the game has finished.
"""
self.home_team = None
"""
The team abbreviation for the home team. Use the `nfldb.Team`
constructor to get more information on a team.
"""
self.home_score = None
"""The current total score for the home team."""
self.home_score_q1 = None
"""The 1st quarter score for the home team."""
self.home_score_q2 = None
"""The 2nd quarter score for the home team."""
self.home_score_q3 = None
"""The 3rd quarter score for the home team."""
self.home_score_q4 = None
"""The 4th quarter score for the home team."""
self.home_score_q5 = None
"""The OT quarter score for the home team."""
self.home_turnovers = None
"""Total turnovers for the home team."""
self.away_team = None
"""
The team abbreviation for the away team. Use the `nfldb.Team`
constructor to get more information on a team.
"""
self.away_score = None
"""The current total score for the away team."""
self.away_score_q1 = None
"""The 1st quarter score for the away team."""
self.away_score_q2 = None
"""The 2nd quarter score for the away team."""
self.away_score_q3 = None
"""The 3rd quarter score for the away team."""
self.away_score_q4 = None
"""The 4th quarter score for the away team."""
self.away_score_q5 = None
"""The OT quarter score for the away team."""
self.away_turnovers = None
"""Total turnovers for the away team."""
self.time_inserted = None
"""The date and time that this game was added."""
self.time_updated = None
"""The date and time that this game was last updated."""
self.winner = None
"""The team abbreviation for the winner of this game."""
self.loser = None
"""The team abbreviation for the loser of this game."""
@property
def is_playing(self):
"""
Returns `True` is the game is currently being played and
`False` otherwise.
A game is being played if it is not finished and if the current
time proceeds the game's start time.
"""
return not self.finished and now() >= self.start_time
@property
def drives(self):
"""
A list of `nfldb.Drive`s for this game. They are automatically
loaded from the database if they haven't been already.
If there are no drives found in the game, then an empty list
is returned.
"""
if self._drives is None:
import nfldb.query
q = nfldb.query.Query(self._db)
self._drives = q.drive(gsis_id=self.gsis_id).as_drives()
for d in self._drives:
d._game = self
return self._drives
@property
def plays(self):
"""
A list of `nfldb.Play` objects in this game. Data is retrieved
from the database if it hasn't been already.
"""
if self._plays is None:
import nfldb.query
q = nfldb.query.Query(self._db)
q.sort([('time', 'asc'), ('play_id', 'asc')])
self._plays = q.play(gsis_id=self.gsis_id).as_plays()
return self._plays
def plays_range(self, start, end):
"""
Returns a list of `nfldb.Play` objects for this game in the
time range specified. The range corresponds to a half-open
interval, i.e., `[start, end)`. Namely, all plays starting at
or after `start` up to plays starting *before* `end`.
The plays are returned in the order in which they occurred.
`start` and `end` should be instances of the
`nfldb.Clock` class. (Hint: Values can be created with the
`nfldb.Clock.from_str` function.)
"""
import nfldb.query as query
q = query.Query(self._db)
q.play(gsis_id=self.gsis_id, time__ge=start, time__lt=end)
q.sort([('time', 'asc'), ('play_id', 'asc')])
return q.as_plays()
def score_in_plays(self, plays):
"""
Returns the scores made by the home and away teams from the
sequence of plays given. The scores are returned as a `(home,
away)` tuple. Note that this method assumes that `plays` is
sorted in the order in which the plays occurred.
"""
# This method is a heuristic to compute the total number of points
# scored in a set of plays. Naively, this should be a simple summation
# of the `points` attribute of each field. However, it seems that
# the JSON feed (where this data comes from) heavily biases toward
# omitting XPs. Therefore, we attempt to add them. A brief outline
# of the heuristic follows.
#
# In *most* cases, a TD is followed by either an XP attempt or a 2 PTC
# attempt by the same team. Therefore, after each TD, we look for the
# next play that fits this criteria, while being careful not to find
# a play that has already counted toward the score. If no play was
# found, then we assume there was an XP attempt and that it was good.
# Otherwise, if a play is found matching the given TD, the point total
# of that play is added to the score.
#
# Note that this relies on the property that every TD is paired with
# an XP/2PTC with respect to the final score of a game. Namely, when
# searching for the XP/2PTC after a TD, it may find a play that came
# after a different TD. But this is OK, so long as we never double
# count any particular play.
def is_twopta(p):
return (p.passing_twopta > 0
or p.receiving_twopta > 0
or p.rushing_twopta > 0)
counted = set() # don't double count
home, away = 0, 0
for i, p in enumerate(plays):
pts = p.points
if pts > 0 and p.play_id not in counted:
counted.add(p.play_id)
if pts == 6:
def after_td(p2):
return (p.pos_team == p2.pos_team
and (p2.kicking_xpa > 0 or is_twopta(p2))
and p2.play_id not in counted)
next = _next_play_with(plays, p, after_td)
if next is None:
pts += 1
elif next.play_id not in counted:
pts += next.points
counted.add(next.play_id)
if p.scoring_team == self.home_team:
home += pts
else:
away += pts
return home, away
def score_at_time(self, time):
"""
Returns the score of the game at the time specified as a
`(home, away)` tuple.
`time` should be an instance of the `nfldb.Clock` class.
(Hint: Values can be created with the `nfldb.Clock.from_str`
function.)
"""
start = Clock.from_str('Pregame', '0:00')
return self.score_in_plays(self.plays_range(start, time))
@property
def play_players(self):
"""
A list of `nfldb.PlayPlayer` objects in this game. Data is
retrieved from the database if it hasn't been already.
"""
pps = []
for play in self.plays:
for pp in play.play_players:
pps.append(pp)
return pps
@property
def players(self):
"""
A list of tuples of player data. The first element is the team
the player was on during the game and the second element is a
`nfldb.Player` object corresponding to that player's meta data
(including the team he's currently on). The list is returned
without duplicates and sorted by team and player name.
"""
pset = set()
players = []
for pp in self.play_players:
if pp.player_id not in pset:
players.append((pp.team, pp.player))
pset.add(pp.player_id)
return sorted(players)
def _save(self, cursor):
super(Game, self)._save(cursor)
if not self._drives:
return
# Remove any drives that are stale.
cursor.execute('''
DELETE FROM drive
WHERE gsis_id = %s AND NOT (drive_id = ANY (%s))
''', (self.gsis_id, [d.drive_id for d in self._drives]))
for drive in (self._drives or []):
drive._save(cursor)
def __str__(self):
return '%s %d week %d on %s at %s, %s (%d) at %s (%d)' \
% (self.season_type, self.season_year, self.week,
self.start_time.strftime('%m/%d'),
self.start_time.strftime('%I:%M%p'),
self.away_team, self.away_score,
self.home_team, self.home_score)
| {
"repo_name": "BurntSushi/nfldb",
"path": "nfldb/types.py",
"copies": "1",
"size": "87567",
"license": "unlicense",
"hash": -4909060185579621000,
"line_mean": 34.5385551948,
"line_max": 80,
"alpha_frac": 0.5660808295,
"autogenerated": false,
"ratio": 3.8315830926752428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9897550643076456,
"avg_score": 0.000022655819757269033,
"num_lines": 2464
} |
from __future__ import absolute_import, division, print_function
try:
from specter.psf import load_psf
nospecter = False
except ImportError:
from desiutil.log import get_logger
log = get_logger()
log.error('specter not installed; skipping extraction tests')
nospecter = True
import unittest
import uuid
import os
from glob import glob
from pkg_resources import resource_filename
import desispec.image
import desispec.io
import desispec.scripts.extract
from astropy.io import fits
import numpy as np
class TestExtract(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testhash = uuid.uuid4()
cls.imgfile = 'test-img-{}.fits'.format(cls.testhash)
cls.outfile = 'test-out-{}.fits'.format(cls.testhash)
cls.outmodel = 'test-model-{}.fits'.format(cls.testhash)
cls.fibermapfile = 'test-fibermap-{}.fits'.format(cls.testhash)
cls.psffile = resource_filename('specter', 'test/t/psf-monospot.fits')
# cls.psf = load_psf(cls.psffile)
pix = np.random.normal(0, 3.0, size=(400,400))
ivar = np.ones_like(pix) / 3.0**2
mask = np.zeros(pix.shape, dtype=np.uint32)
mask[200] = 1
img = desispec.image.Image(pix, ivar, mask, camera='z0')
desispec.io.write_image(cls.imgfile, img, meta=dict(flavor='science'))
fibermap = desispec.io.empty_fibermap(100)
desispec.io.write_fibermap(cls.fibermapfile, fibermap)
def setUp(self):
for filename in (self.outfile, self.outmodel):
if os.path.exists(filename):
os.remove(filename)
@classmethod
def tearDownClass(cls):
for filename in glob('test-*{}*.fits'.format(cls.testhash)):
if os.path.exists(filename):
os.remove(filename)
@unittest.skipIf(nospecter, 'specter not installed; skipping extraction test')
def test_extract(self):
template = "desi_extract_spectra -i {} -p {} -w 7500,7600,0.75 -f {} -s 0 -n 3 -o {} -m {}"
cmd = template.format(self.imgfile, self.psffile, self.fibermapfile, self.outfile, self.outmodel)
opts = cmd.split(" ")[1:]
args = desispec.scripts.extract.parse(opts)
desispec.scripts.extract.main(args)
self.assertTrue(os.path.exists(self.outfile))
frame1 = desispec.io.read_frame(self.outfile)
model1 = fits.getdata(self.outmodel)
os.remove(self.outfile)
os.remove(self.outmodel)
desispec.scripts.extract.main_mpi(args, comm=None)
self.assertTrue(os.path.exists(self.outfile))
frame2 = desispec.io.read_frame(self.outfile)
model2 = fits.getdata(self.outmodel)
self.assertTrue(np.all(frame1.flux[0:3] == frame2.flux[0:3]))
self.assertTrue(np.all(frame1.ivar[0:3] == frame2.ivar[0:3]))
self.assertTrue(np.all(frame1.mask[0:3] == frame2.mask[0:3]))
self.assertTrue(np.all(frame1.chi2pix[0:3] == frame2.chi2pix[0:3]))
self.assertTrue(np.all(frame1.resolution_data[0:3] == frame2.resolution_data[0:3]))
#- These agree at the level of 1e-11 but not 1e-15. Why not?
#- We'll open a separate ticket about that, but allow to pass for now
### self.assertTrue(np.allclose(model1, model2, rtol=1e-15, atol=1e-15))
self.assertTrue(np.allclose(model1, model2, rtol=1e-11, atol=1e-11))
#- Check that units made it into the file
self.assertEqual(frame1.meta['BUNIT'], 'electron/Angstrom')
self.assertEqual(frame2.meta['BUNIT'], 'electron/Angstrom')
def test_boxcar(self):
from desispec.quicklook.qlboxcar import do_boxcar
from desispec.io import read_xytraceset
#psf = load_psf(self.psffile)
tset = read_xytraceset(self.psffile)
pix = np.random.normal(0, 3.0, size=(tset.npix_y, tset.npix_y))
ivar = np.ones_like(pix) / 3.0**2
mask = np.zeros(pix.shape, dtype=np.uint32)
img = desispec.image.Image(pix, ivar, mask, camera='z0')
outwave = np.arange(7500, 7600)
nwave = len(outwave)
nspec = 5
flux, ivar, resolution = do_boxcar(img, tset, outwave, boxwidth=2.5, nspec=nspec)
self.assertEqual(flux.shape, (nspec, nwave))
self.assertEqual(ivar.shape, (nspec, nwave))
self.assertEqual(resolution.shape[0], nspec)
# resolution.shape[1] is number of diagonals; picked by algorithm
self.assertEqual(resolution.shape[2], nwave)
def _test_bundles(self, template, specmin, nspec):
#- should also work with bundles and not starting at spectrum 0
cmd = template.format(self.imgfile, self.psffile, self.fibermapfile,
self.outfile, self.outmodel, specmin, nspec)
opts = cmd.split(" ")[1:]
args = desispec.scripts.extract.parse(opts)
desispec.scripts.extract.main(args)
self.assertTrue(os.path.exists(self.outfile))
frame1 = desispec.io.read_frame(self.outfile)
model1 = fits.getdata(self.outmodel)
os.remove(self.outfile)
os.remove(self.outmodel)
desispec.scripts.extract.main_mpi(args, comm=None)
self.assertTrue(os.path.exists(self.outfile))
frame2 = desispec.io.read_frame(self.outfile)
model2 = fits.getdata(self.outmodel)
errmsg = f'for specmin={specmin}, nspec={nspec}'
self.assertTrue(np.all(frame1.flux == frame2.flux), errmsg)
self.assertTrue(np.all(frame1.ivar == frame2.ivar), errmsg)
self.assertTrue(np.all(frame1.mask == frame2.mask), errmsg)
self.assertTrue(np.all(frame1.chi2pix == frame2.chi2pix), errmsg)
self.assertTrue(np.all(frame1.resolution_data == frame2.resolution_data),errmsg)
#- pixel model isn't valid for small bundles that actually overlap; don't test
# self.assertTrue(np.allclose(model1, model2, rtol=1e-15, atol=1e-15))
#- traditional and MPI versions agree when starting at spectrum 0
def test_bundles1(self):
self._test_bundles("desi_extract_spectra -i {} -p {} -w 7500,7530,0.75 --nwavestep 10 -f {} --bundlesize 3 -o {} -m {} -s {} -n {}", 0, 5)
#- test starting at a bundle non-boundary
def test_bundles2(self):
self._test_bundles("desi_extract_spectra -i {} -p {} -w 7500,7530,0.75 --nwavestep 10 -f {} --bundlesize 3 -o {} -m {} -s {} -n {}", 2, 5)
def test_bundles3(self):
self._test_bundles("desi_extract_spectra -i {} -p {} -w 7500,7530,0.75 --nwavestep 10 -f {} --bundlesize 3 -o {} -m {} -s {} -n {}", 22, 5)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "desihub/desispec",
"path": "py/desispec/test/test_extract.py",
"copies": "1",
"size": "6596",
"license": "bsd-3-clause",
"hash": 6476238949012834000,
"line_mean": 41.2820512821,
"line_max": 147,
"alpha_frac": 0.6400848999,
"autogenerated": false,
"ratio": 3.1499522445081185,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9274769460023585,
"avg_score": 0.003053536876906673,
"num_lines": 156
} |
from __future__ import absolute_import, division, print_function
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import greplin.scales
import greplin.scales.formats
import greplin.scales.util
def scales_stats(request):
parts = request.matchdict.get('prefix')
path = '/'.join(parts)
stats = greplin.scales.util.lookup(greplin.scales.getStats(), parts)
output = StringIO()
outputFormat = request.params.get('format', 'html')
query = request.params.get('query', None)
if outputFormat == 'json':
request.response.content_type = 'application/json'
greplin.scales.formats.jsonFormat(output, stats, query)
elif outputFormat == 'prettyjson':
request.response.content_type = 'application/json'
greplin.scales.formats.jsonFormat(output, stats, query, pretty=True)
else:
request.response.content_type = 'text/html'
# XXX Dear pyramid.renderers.string_renderer_factory,
# you can't be serious
request.response.default_content_type = 'not-text/html'
output.write('<html>')
greplin.scales.formats.htmlHeader(output, '/' + path, __name__, query)
greplin.scales.formats.htmlFormat(output, tuple(parts), stats, query)
output.write('</html>')
return output.getvalue()
def includeme(config):
config.add_route('scales', '/scales/*prefix')
config.add_view(scales_stats, route_name='scales', renderer='string')
| {
"repo_name": "wosc/pyramid_scales",
"path": "src/pyramid_scales/__init__.py",
"copies": "1",
"size": "1480",
"license": "bsd-3-clause",
"hash": 780143237284034700,
"line_mean": 34.2380952381,
"line_max": 78,
"alpha_frac": 0.6844594595,
"autogenerated": false,
"ratio": 3.8046272493573263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49890867088573265,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
_thread_locals = local()
def get_current_request():
""" returns the request object for this thread """
return getattr(_thread_locals, "request", None)
def get_current_user():
""" returns the current user, if exist, otherwise returns None """
request = get_current_request()
if request:
return getattr(request, "user", None)
class ThreadLocalMiddleware(object):
""" Simple middleware that adds the request object in thread local storage."""
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
_thread_locals.request = request
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
| {
"repo_name": "exildev/Piscix",
"path": "Piscix/middleware.py",
"copies": "2",
"size": "1131",
"license": "mit",
"hash": -1492557289073657600,
"line_mean": 27.275,
"line_max": 82,
"alpha_frac": 0.6746242263,
"autogenerated": false,
"ratio": 4.470355731225297,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00030120481927710846,
"num_lines": 40
} |
from __future__ import absolute_import, division, print_function
try:
import bpython # noqa
# Access a property to verify module exists in case
# there's a demand loader wrapping module imports
# See https://github.com/inducer/pudb/issues/177
bpython.__version__
except ImportError:
HAVE_BPYTHON = False
else:
HAVE_BPYTHON = True
try:
from prompt_toolkit.contrib.repl import embed as ptpython_embed
except ImportError:
HAVE_PTPYTHON = False
else:
HAVE_PTPYTHON = True
try:
import readline
import rlcompleter
HAVE_READLINE = True
except ImportError:
HAVE_READLINE = False
# {{{ combined locals/globals dict
class SetPropagatingDict(dict):
def __init__(self, source_dicts, target_dict):
dict.__init__(self)
for s in source_dicts[::-1]:
self.update(s)
self.target_dict = target_dict
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.target_dict[key] = value
def __delitem__(self, key):
dict.__delitem__(self, key)
del self.target_dict[key]
# }}}
def run_classic_shell(locals, globals, first_time):
if first_time:
banner = "Hit Ctrl-D to return to PuDB."
else:
banner = ""
ns = SetPropagatingDict([locals, globals], locals)
from pudb.settings import get_save_config_path
from os.path import join
hist_file = join(
get_save_config_path(),
"shell-history")
if HAVE_READLINE:
readline.set_completer(
rlcompleter.Completer(ns).complete)
readline.parse_and_bind("tab: complete")
try:
readline.read_history_file(hist_file)
except IOError:
pass
from code import InteractiveConsole
cons = InteractiveConsole(ns)
cons.interact(banner)
if HAVE_READLINE:
readline.write_history_file(hist_file)
def run_bpython_shell(locals, globals, first_time):
ns = SetPropagatingDict([locals, globals], locals)
import bpython.cli
bpython.cli.main(args=[], locals_=ns)
# {{{ ipython
def have_ipython():
# IPython has started being obnoxious on import, only import
# if absolutely needed.
# https://github.com/ipython/ipython/issues/9435
try:
import IPython
# Access a property to verify module exists in case
# there's a demand loader wrapping module imports
# See https://github.com/inducer/pudb/issues/177
IPython.core
except (ImportError, ValueError):
# Old IPythons versions (0.12?) may fail to import with
# ValueError: fallback required, but not specified
# https://github.com/inducer/pudb/pull/135
return False
else:
return True
def ipython_version():
if have_ipython():
from IPython import version_info
return version_info
else:
return None
def run_ipython_shell_v10(locals, globals, first_time):
'''IPython shell from IPython version 0.10'''
if first_time:
banner = "Hit Ctrl-D to return to PuDB."
else:
banner = ""
# avoid IPython's namespace litter
ns = locals.copy()
from IPython.Shell import IPShell
IPShell(argv=[], user_ns=ns, user_global_ns=globals) \
.mainloop(banner=banner)
def _update_ipython_ns(shell, locals, globals):
'''Update the IPython 0.11 namespace at every visit'''
shell.user_ns = locals.copy()
try:
shell.user_global_ns = globals
except AttributeError:
class DummyMod(object):
"A dummy module used for IPython's interactive namespace."
pass
user_module = DummyMod()
user_module.__dict__ = globals
shell.user_module = user_module
shell.init_user_ns()
shell.init_completer()
def run_ipython_shell_v11(locals, globals, first_time):
'''IPython shell from IPython version 0.11'''
if first_time:
banner = "Hit Ctrl-D to return to PuDB."
else:
banner = ""
try:
# IPython 1.0 got rid of the frontend intermediary, and complains with
# a deprecated warning when you use it.
from IPython.terminal.interactiveshell import TerminalInteractiveShell
from IPython.terminal.ipapp import load_default_config
except ImportError:
from IPython.frontend.terminal.interactiveshell import \
TerminalInteractiveShell
from IPython.frontend.terminal.ipapp import load_default_config
# XXX: in the future it could be useful to load a 'pudb' config for the
# user (if it exists) that could contain the user's macros and other
# niceities.
config = load_default_config()
shell = TerminalInteractiveShell.instance(config=config,
banner2=banner)
# XXX This avoids a warning about not having unique session/line numbers.
# See the HistoryManager.writeout_cache method in IPython.core.history.
shell.history_manager.new_session()
# Save the originating namespace
old_locals = shell.user_ns
old_globals = shell.user_global_ns
# Update shell with current namespace
_update_ipython_ns(shell, locals, globals)
args = []
if ipython_version() < (5, 0, 0):
args.append(banner)
else:
print(banner)
shell.mainloop(*args)
# Restore originating namespace
_update_ipython_ns(shell, old_locals, old_globals)
def run_ipython_shell(locals, globals, first_time):
import IPython
if have_ipython() and hasattr(IPython, 'Shell'):
return run_ipython_shell_v10(locals, globals, first_time)
else:
return run_ipython_shell_v11(locals, globals, first_time)
# }}}
def run_ptpython_shell(locals, globals, first_time):
# Use the default ptpython history
import os
history_filename = os.path.expanduser('~/.ptpython_history')
ptpython_embed(globals.copy(), locals.copy(),
history_filename=history_filename)
# vim: foldmethod=marker
| {
"repo_name": "albfan/pudb",
"path": "pudb/shell.py",
"copies": "1",
"size": "6010",
"license": "mit",
"hash": -7321483349218338000,
"line_mean": 26.6958525346,
"line_max": 78,
"alpha_frac": 0.6499168053,
"autogenerated": false,
"ratio": 3.8950097213220998,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50449265266221,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
try:
import cPickle as pickle
except ImportError:
import pickle
from . import serialize
import tempfile
try:
from cytoolz import groupby, take, concat, curry
except ImportError:
from toolz import groupby, take, concat, curry
import os
import shutil
import psutil
import random
from collections import Iterator, Iterable
import dill
global_chunksize = [100]
class PBag(object):
""" Partitioned, on-disk, Bag
TODO: Find better name
A PBag partitions and stores a sequence on disk.
It assigns a group to each element of the input and stores batches of
similarly grouped inputs to a file on disk. It does this in a streaming
way to enable the partitioning of large sequences on disk.
It also enables the extraction of any one of those groups.
>>> pb = PBag(grouper=lambda x: x[0], npartitions=10)
>>> pb.extend([[0, 'Alice', 100], [1, 'Bob', 200], [0, 'Charlie', 300]])
>>> pb.get_partition(0)
[[0, 'Alice', 100], [0, 'Charlie', 300]]
"""
def __init__(self, grouper, npartitions, path=None, open=open,
dump=serialize.dump, load=serialize.load):
# dump=curry(pickle.dump, protocol=pickle.HIGHEST_PROTOCOL),
# load=pickle.load
self.grouper = grouper
if path is None:
self.path = tempfile.mkdtemp('.pbag')
self._explicitly_given_path = False
else:
self.path = path
if not os.path.exists(path):
os.mkdir(self.path)
self._explicitly_given_path = True
self.npartitions = npartitions
self.open = open
self.isopen = False
self.dump = dump
self.load = load
self.filenames = [os.path.join(self.path, '%d.part' % i)
for i in range(self.npartitions)]
def _open_files(self):
if not self.isopen:
self.isopen = True
self.files = [self.open(fn, 'ab') for fn in self.filenames]
def _close_files(self):
if self.isopen:
for f in self.files:
f.close()
self.files = []
self.isopen = False
def __enter__(self):
self._open_files()
return self
def __exit__(self, dType, eValue, eTrace):
self._close_files()
def partition_of(self, item):
return hash(self.grouper(item)) % self.npartitions
def extend_chunk(self, seq):
self._open_files()
grouper = self.grouper
npart = self.npartitions
groups = groupby(grouper, seq)
# Unify groups that hash the same
groups2 = dict()
for k, v in groups.items():
key = hash(k) % self.npartitions
if key not in groups2:
groups2[key] = []
groups2[key].extend(v)
# Store to disk
for k, group in groups2.items():
if group:
self.dump(group, self.files[k])
def extend(self, seq):
if isinstance(seq, Iterator):
start_available_memory = psutil.avail_phymem()
# Two bounds to avoid hysteresis
target_low = 0.4 * start_available_memory
target_high = 0.6 * start_available_memory
# Pull chunksize from last run
chunksize = global_chunksize[0]
empty = False
while not empty:
chunk = tuple(take(chunksize, seq))
self.extend_chunk(chunk)
# tweak chunksize if necessary
available_memory = psutil.avail_phymem()
if len(chunk) == chunksize:
if available_memory > target_high:
chunksize = int(chunksize * 1.6)
elif available_memory < target_low:
chunksize = int(chunksize / 1.6)
empty, seq = isempty(seq)
global_chunksize[0] = chunksize
else:
self.extend_chunk(seq)
def get_partition(self, i):
self._close_files()
with self.open(self.filenames[i], mode='rb') as f:
segments = []
while True:
try:
segments.append(self.load(f))
except (EOFError, IOError):
break
if not segments:
return segments
return sum(segments, type(segments[0])())
def __del__(self):
self._close_files()
if not self._explicitly_given_path:
self.drop()
def drop(self):
shutil.rmtree(self.path)
def __getstate__(self):
return dill.dumps(self.__dict__)
def __setstate__(self, state):
self.__dict__.update(dill.loads(state))
def partition_all(n, seq):
""" Take chunks from the sequence, n elements at a time
>>> parts = partition_all(3, [1, 2, 3, 4, 5, 6, 7, 8])
>>> for part in parts:
... print(tuple(part))
(1, 2, 3)
(4, 5, 6)
(7, 8)
The results are themselves lazy and so must be evaluated entirely before
the next block is requested
"""
seq = iter(seq)
stop, seq = isempty(seq)
while not stop:
yield take(n, seq)
stop, seq = isempty(seq)
def isempty(seq):
""" Is the sequence empty?
>>> seq = iter([1, 2, 3])
>>> empty, seq = isempty(seq)
>>> empty
False
>>> list(seq) # seq is preserved
[1, 2, 3]
>>> seq = iter([])
>>> empty, seq = isempty(seq)
>>> empty
True
"""
try:
first = next(seq)
return False, concat([[first], seq])
except StopIteration:
return True, False
| {
"repo_name": "esc/dask",
"path": "pbag/core.py",
"copies": "4",
"size": "5711",
"license": "bsd-3-clause",
"hash": 2685155293268708000,
"line_mean": 27.2722772277,
"line_max": 77,
"alpha_frac": 0.5506916477,
"autogenerated": false,
"ratio": 3.957726957726958,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00048180996759725593,
"num_lines": 202
} |
from __future__ import (absolute_import, division, print_function)
try:
import cPickle as pickle
except ImportError:
import pickle
import gzip
import os
import struct
import numpy as np
xkey, ykey = 'times_cpu', 'rmsd_over_atol'
def read():
basename = os.path.splitext(os.path.basename(__file__))[0]
assert basename.startswith('plot_')
basename = basename[len('plot_'):]
source = os.path.join(os.path.dirname(__file__), basename + '.pkl')
if not os.path.exists(source):
raise IOError("%s does not exist. Run rmsd_vs_texec.py first" % source)
results = pickle.load(gzip.open(source, 'rb'))
varied_keys = results.pop('varied_keys')
varied_vals = results.pop('varied_values')
if len(varied_keys) != 3 or len(varied_vals) != 3:
raise ValueError("Script assumes 3 parameters (hue, tone, marker)")
base_colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1),
(1, 1, 0), (1, 0, 1), (0, 1, 1)]
def _color(params):
hue_val, tone_val = params[:2]
hue_vals, tone_vals = varied_vals[:2]
tone = 1/4. + 3*tone_val/float(tone_vals[-1])/4.
hue = base_colors[hue_vals.index(hue_val)]
color = tuple(np.asarray(np.round(255*np.array(hue)*tone), dtype=int))
return '#' + struct.pack('BBB', *color).encode('hex')
for k, v in results.items():
results[k]['color'] = _color(k)
return results, varied_keys, varied_vals
def get_bokeh_fig():
from bokeh.plotting import Figure # , gridplot
from bokeh.models import ColumnDataSource, HoverTool
results, varied_keys, varied_vals = read()
include_keys = varied_keys + [
'nfev', 'njev', 'nprec_setup', 'nprec_solve', 'njacvec_dot',
'nprec_solve_ilu', 'nprec_solve_lu', "n_steps", "n_rhs_evals",
"n_lin_solv_setups", "n_err_test_fails", "n_nonlin_solv_iters",
"n_nonlin_solv_conv_fails", "krylov_n_lin_iters",
"krylov_n_prec_evals", "krylov_n_prec_solves", "krylov_n_conv_fails",
"krylov_n_jac_times_evals", "krylov_n_iter_rhs"
]
cols = [xkey, ykey, 'color'] + include_keys
sources = {}
varied3 = varied_vals[2]
keys = list(results.keys())
vals = list(results.values())
for val in varied3:
sources[val] = ColumnDataSource(data={k: [] for k in cols})
for k in cols:
sources[val].data[k] = [vals[idx].get(k, None) for idx in
range(len(vals)) if keys[idx][2] == val]
hover = HoverTool(tooltips=[(k, '@'+k) for k in include_keys])
top = Figure(
plot_height=600, plot_width=800, title="%s vs. %s" % (ykey, xkey),
x_axis_type="linear", y_axis_type="log", tools=[
hover, 'pan', 'reset', 'box_zoom', 'wheel_zoom', 'save'])
top.xaxis.axis_label = xkey
top.yaxis.axis_label = ykey
for source, marker in zip(sources.values(), ['circle', 'diamond']):
top.scatter(x=xkey, y=ykey, source=source, size=9, color="color",
line_color=None, marker=marker)
return top
def plot_with_matplotlib(savefig='none', dpi=300, errorbar=False, linx=False,
liny=False):
import matplotlib.pyplot as plt
results, varied_keys, varied_vals = read()
plt.rc('font', family='serif')
def label(data):
if data[varied_keys[1]] == varied_vals[1][-1]:
meth = data['method']
meth = meth.replace('bdf', r'$\mathrm{BDF}$')
meth = meth.replace('adams', r'$\mathrm{Adams}$')
return '$%d$, %s' % (data['nstencil'], meth)
for params, data in results.items():
mrkr = 'od'[varied_vals[2].index(params[2])]
if errorbar:
cb, kwargs = plt.errorbar, dict(xerr=2*data['d'+xkey])
else:
cb, kwargs = plt.plot, {}
cb(data[xkey], data[ykey], marker=mrkr, color=data['color'],
label=label(data), ls='None', **kwargs)
ax = plt.gca()
if not linx:
ax.set_xscale('log')
if not liny:
ax.set_yscale('log')
plt.xlabel('$t_{exec}$')
plt.ylabel(r'$\mathrm{RMSD}\ /\ \mathrm{atol}$')
handles, labels = ax.get_legend_handles_labels()
# reverse the order
ax.legend(handles[::-1], labels[::-1])
# or sort them by labels
import operator
hl = sorted(zip(handles, labels),
key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, numpoints=1, prop={'size': 12})
if savefig.lower() == 'none':
plt.show()
else:
plt.savefig(savefig, dpi=dpi)
if __name__.startswith('bk_'):
# e.g:
#
# $ bokeh html plot_rmsd_vs_texec.py
# $ bokeh serve plot_rmsd_vs_texec.py
from bokeh.io import curdoc
curdoc().add_root(get_bokeh_fig())
if __name__ == '__main__':
# e.g:
#
# $ python plot_rmsd_vs_texec.py
# $ python plot_rmsd_vs_texec.py --savefig figure.png --dpi 100
import argh
argh.dispatch_command(plot_with_matplotlib)
| {
"repo_name": "bjodah/chemreac",
"path": "examples/plot_rmsd_vs_texec.py",
"copies": "2",
"size": "5000",
"license": "bsd-2-clause",
"hash": -7133027400877302000,
"line_mean": 34.2112676056,
"line_max": 79,
"alpha_frac": 0.5804,
"autogenerated": false,
"ratio": 3.0525030525030523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46329030525030523,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
try:
import flask
from flask import Flask, request
except ImportError:
pass
import blaze
import socket
import json
from toolz import assoc
from functools import partial, wraps
from blaze import into, compute
from blaze.expr import utils as expr_utils
from blaze.compute import compute_up
from datashape.predicates import iscollection, isscalar
from ..interactive import InteractiveSymbol, coerce_scalar
from ..utils import json_dumps
from ..expr import Expr, symbol
from datashape import Mono, discover
__all__ = 'Server', 'to_tree', 'from_tree'
# http://www.speedguide.net/port.php?port=6363
# http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers
DEFAULT_PORT = 6363
class Server(object):
""" Blaze Data Server
Host local data through a web API
Parameters
----------
data : ``dict`` or ``None``, optional
A dictionary mapping dataset name to any data format that blaze
understands.
Examples
--------
>>> from pandas import DataFrame
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Alice', 300],
... [4, 'Dennis', 400],
... [5, 'Bob', -500]],
... columns=['id', 'name', 'amount'])
>>> server = Server({'accounts': df})
>>> server.run() # doctest: +SKIP
"""
__slots__ = 'app', 'data', 'port'
def __init__(self, data=None):
app = self.app = Flask('blaze.server.server')
if data is None:
data = dict()
self.data = data
for args, kwargs, func in routes:
func2 = wraps(func)(partial(func, self.data))
app.route(*args, **kwargs)(func2)
def run(self, *args, **kwargs):
"""Run the server"""
port = kwargs.pop('port', DEFAULT_PORT)
self.port = port
try:
self.app.run(*args, port=port, **kwargs)
except socket.error:
print("\tOops, couldn't connect on port %d. Is it busy?" % port)
self.run(*args, **assoc(kwargs, 'port', port + 1))
routes = list()
def route(*args, **kwargs):
def f(func):
routes.append((args, kwargs, func))
return func
return f
@route('/datashape')
def dataset(data):
return str(discover(data))
def to_tree(expr, names=None):
""" Represent Blaze expression with core data structures
Transform a Blaze expression into a form using only strings, dicts, lists
and base types (int, float, datetime, ....) This form can be useful for
serialization.
Parameters
----------
expr: Blaze Expression
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> to_tree(t) # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> to_tree(t.x.sum()) # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Column',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Column', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
blaze.server.server.from_tree
"""
if names and expr in names:
return names[expr]
if isinstance(expr, tuple):
return [to_tree(arg, names=names) for arg in expr]
if isinstance(expr, expr_utils._slice):
return to_tree(expr.as_slice(), names=names)
if isinstance(expr, slice):
return {'op': 'slice',
'args': [to_tree(arg, names=names) for arg in
[expr.start, expr.stop, expr.step]]}
elif isinstance(expr, Mono):
return str(expr)
elif isinstance(expr, InteractiveSymbol):
return to_tree(symbol(expr._name, expr.dshape), names)
elif isinstance(expr, Expr):
return {'op': type(expr).__name__,
'args': [to_tree(arg, names) for arg in expr._args]}
else:
return expr
def expression_from_name(name):
"""
>>> expression_from_name('By')
<class 'blaze.expr.split_apply_combine.By'>
>>> expression_from_name('And')
<class 'blaze.expr.arithmetic.And'>
"""
import blaze
if hasattr(blaze, name):
return getattr(blaze, name)
if hasattr(blaze.expr, name):
return getattr(blaze.expr, name)
for signature, func in compute_up.funcs.items():
try:
if signature[0].__name__ == name:
return signature[0]
except TypeError:
pass
raise ValueError('%s not found in compute_up' % name)
def from_tree(expr, namespace=None):
""" Convert core data structures to Blaze expression
Core data structure representations created by ``to_tree`` are converted
back into Blaze expressions.
Parameters
----------
expr : dict
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> tree = to_tree(t)
>>> tree # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> from_tree(tree)
t
>>> tree = to_tree(t.x.sum())
>>> tree # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Field',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
>>> from_tree(tree)
sum(t.x)
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Field', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
blaze.server.server.to_tree
"""
if isinstance(expr, dict):
op, args = expr['op'], expr['args']
if 'slice' == op:
return expr_utils._slice(*[from_tree(arg, namespace)
for arg in args])
if hasattr(blaze.expr, op):
cls = getattr(blaze.expr, op)
else:
cls = expression_from_name(op)
if 'Symbol' in op:
children = [from_tree(arg) for arg in args]
else:
children = [from_tree(arg, namespace) for arg in args]
return cls(*children)
elif isinstance(expr, list):
return tuple(from_tree(arg, namespace) for arg in expr)
if namespace and expr in namespace:
return namespace[expr]
else:
return expr
@route('/compute.json', methods=['POST', 'PUT', 'GET'])
def compserver(dataset):
if request.headers['content-type'] != 'application/json':
return ("Expected JSON data", 404)
try:
payload = json.loads(request.data.decode('utf-8'))
except ValueError:
return ("Bad JSON. Got %s " % request.data, 404)
ns = payload.get('namespace', dict())
ns[':leaf'] = symbol('leaf', discover(dataset))
expr = from_tree(payload['expr'], namespace=ns)
assert len(expr._leaves()) == 1
leaf = expr._leaves()[0]
try:
result = compute(expr, {leaf: dataset})
except Exception as e:
return ("Computation failed with message:\n%s" % e, 500)
if iscollection(expr.dshape):
result = into(list, result)
elif isscalar(expr.dshape):
result = coerce_scalar(result, str(expr.dshape))
return json.dumps({'datashape': str(expr.dshape),
'data': result}, default=json_dumps)
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/server/server.py",
"copies": "1",
"size": "7939",
"license": "bsd-3-clause",
"hash": -1490804352162555100,
"line_mean": 26.2817869416,
"line_max": 77,
"alpha_frac": 0.5470462275,
"autogenerated": false,
"ratio": 3.668669131238447,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4715715358738447,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
try:
import flask
from flask import json
import requests
except ImportError:
pass
from odo import resource
from datashape import dshape
from ..expr import Expr
from ..dispatch import dispatch
from .server import DEFAULT_PORT
# These are a hack for testing
# It's convenient to use requests for live production but use
# flask for testing. Sadly they have different Response objects,
# hence the dispatched functions
__all__ = 'Client', 'ExprClient'
def content(response):
if isinstance(response, flask.Response):
return response.data
if isinstance(response, requests.Response):
return response.content
def ok(response):
if isinstance(response, flask.Response):
return 'OK' in response.status
if isinstance(response, requests.Response):
return response.ok
def reason(response):
if isinstance(response, flask.Response):
return response.status
if isinstance(response, requests.Response):
return response.reason
class Client(object):
""" Client for Blaze Server
Provides programmatic access to datasets living on Blaze Server
Parameters
----------
url : str
URL of a Blaze server
Examples
--------
>>> # This example matches with the docstring of ``Server``
>>> from blaze import Data
>>> c = Client('localhost:6363')
>>> t = Data(c) # doctest: +SKIP
See Also
--------
blaze.server.server.Server
"""
__slots__ = 'url'
def __init__(self, url, **kwargs):
url = url.strip('/')
if not url[:4] == 'http':
url = 'http://' + url
self.url = url
@property
def dshape(self):
"""The datashape of the client"""
response = requests.get('%s/datashape' % self.url)
if not ok(response):
raise ValueError("Bad Response: %s" % reason(response))
return dshape(content(response).decode('utf-8'))
def ExprClient(*args, **kwargs):
import warnings
warnings.warn("Deprecated use `Client` instead", DeprecationWarning)
return Client(*args, **kwargs)
@dispatch(Client)
def discover(c):
return c.dshape
@dispatch(Expr, Client)
def compute_down(expr, ec, **kwargs):
from .server import to_tree
tree = to_tree(expr)
r = requests.get('%s/compute.json' % ec.url,
data=json.dumps({'expr': tree}),
headers={'Content-Type': 'application/json'})
if not ok(r):
raise ValueError("Bad response: %s" % reason(r))
data = json.loads(content(r).decode('utf-8'))
return data['data']
@resource.register('blaze://.+')
def resource_blaze(uri, **kwargs):
uri = uri[len('blaze://'):]
sp = uri.split('/')
tld, rest = sp[0], sp[1:]
if ':' not in tld:
tld = tld + ':%d' % DEFAULT_PORT
uri = '/'.join([tld] + list(rest))
return Client(uri)
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/server/client.py",
"copies": "1",
"size": "2962",
"license": "bsd-3-clause",
"hash": 8022446243667642000,
"line_mean": 22.3228346457,
"line_max": 72,
"alpha_frac": 0.6191762323,
"autogenerated": false,
"ratio": 3.8769633507853403,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.999613958308534,
"avg_score": 0,
"num_lines": 127
} |
from __future__ import absolute_import, division, print_function
try:
import h5py # if we import h5py after tables we segfault
except ImportError:
pass
from pandas import DataFrame
from odo import odo, convert, append, resource, drop
from odo.backends.csv import CSV
from odo.backends.json import JSON, JSONLines
from multipledispatch import halt_ordering, restart_ordering
halt_ordering() # Turn off multipledispatch ordering
from datashape import dshape, discover
from .utils import ignoring
from .expr import (Symbol, TableSymbol, symbol, ndim, shape)
from .expr import (by, count, count_values, distinct, head, join, label, like,
mean, merge, nunique, relabel, selection, sort, summary, var, transform)
from .expr import (date, datetime, day, hour, microsecond, millisecond, month,
second, time, year)
from .expr.arrays import (tensordot, transpose)
from .expr.functions import *
from .index import create_index
from .interactive import *
from .compute.pmap import set_default_pmap
from .compute.csv import *
from .compute.json import *
from .compute.python import *
from .compute.pandas import *
from .compute.numpy import *
from .compute.core import *
from .compute.core import compute
from .cached import CachedDataset
with ignoring(ImportError):
from .server import *
with ignoring(ImportError):
from .sql import *
from .compute.sql import *
with ignoring(ImportError, AttributeError):
from .compute.spark import *
with ignoring(ImportError, TypeError):
from .compute.sparksql import *
with ignoring(ImportError):
from dynd import nd
from .compute.dynd import *
with ignoring(ImportError):
from .compute.h5py import *
with ignoring(ImportError):
from .compute.hdfstore import *
with ignoring(ImportError):
from .compute.pytables import *
with ignoring(ImportError):
from .compute.chunks import *
with ignoring(ImportError):
from .compute.bcolz import *
with ignoring(ImportError):
from .mongo import *
from .compute.mongo import *
with ignoring(ImportError):
from .pytables import *
from .compute.pytables import *
restart_ordering() # Restart multipledispatch ordering and do ordering
inf = float('inf')
nan = float('nan')
__version__ = '0.7.3'
# If IPython is already loaded, register the Blaze catalog magic
# from . import catalog
# import sys
# if 'IPython' in sys.modules:
# catalog.register_ipy_magic()
# del sys
def print_versions():
"""Print all the versions of software that Blaze relies on."""
import sys, platform
import numpy as np
import datashape
print("-=" * 38)
print("Blaze version: %s" % __version__)
print("Datashape version: %s" % datashape.__version__)
print("NumPy version: %s" % np.__version__)
print("Python version: %s" % sys.version)
(sysname, nodename, release, version, machine, processor) = \
platform.uname()
print("Platform: %s-%s-%s (%s)" % (sysname, release, machine, version))
if sysname == "Linux":
print("Linux dist: %s" % " ".join(platform.linux_distribution()[:-1]))
if not processor:
processor = "not recognized"
print("Processor: %s" % processor)
print("Byte-ordering: %s" % sys.byteorder)
print("-=" * 38)
def test(verbose=False, junitfile=None, exit=False):
"""
Runs the full Blaze test suite, outputting
the results of the tests to sys.stdout.
This uses py.test to discover which tests to
run, and runs tests in any 'tests' subdirectory
within the Blaze module.
Parameters
----------
verbose : int, optional
Value 0 prints very little, 1 prints a little bit,
and 2 prints the test names while testing.
junitfile : string, optional
If provided, writes the test results to an junit xml
style xml file. This is useful for running the tests
in a CI server such as Jenkins.
exit : bool, optional
If True, the function will call sys.exit with an
error code after the tests are finished.
"""
import os
import sys
import pytest
args = []
if verbose:
args.append('--verbose')
# Output an xunit file if requested
if junitfile is not None:
args.append('--junit-xml=%s' % junitfile)
# Add all 'tests' subdirectories to the options
rootdir = os.path.dirname(__file__)
for root, dirs, files in os.walk(rootdir):
if 'tests' in dirs:
testsdir = os.path.join(root, 'tests')
args.append(testsdir)
print('Test dir: %s' % testsdir[len(rootdir) + 1:])
# print versions (handy when reporting problems)
print_versions()
sys.stdout.flush()
# Ask pytest to do its thing
error_code = pytest.main(args=args)
if exit:
return sys.exit(error_code)
return error_code == 0
| {
"repo_name": "mrocklin/blaze",
"path": "blaze/__init__.py",
"copies": "1",
"size": "4835",
"license": "bsd-3-clause",
"hash": -1114532890222164500,
"line_mean": 30.3961038961,
"line_max": 80,
"alpha_frac": 0.6825232678,
"autogenerated": false,
"ratio": 3.8312202852614896,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9991145309189396,
"avg_score": 0.004519648774418599,
"num_lines": 154
} |
from __future__ import absolute_import, division, print_function
"""
Various utility functions used by Manhattan. It is not expected that these will
be used externally.
"""
import os
import random
import bisect
import hmac
import hashlib
import binascii
"""
A 1 pixel transparent GIF as a bytestring. For use as a tracking "beacon" in an
HTML document.
"""
transparent_pixel = (
'GIF89a'
'\x01\x00\x01\x00\x80\x00\x00\xff\xff\xff\x00\x00\x00!\xf9\x04'
'\x00\x00\x00\x00\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02'
'\x02D\x01\x00;')
def pixel_tag(path):
"""
Build the HTML surrounding a given image path that will be injected as a
tracking pixel.
:param path:
URL path to the tracking pixel.
:type request:
string
:returns:
HTML markup for image tag.
:rtype:
string
"""
return ('<img style="height:0;width:0;position:absolute;" '
'src="%s" alt="" />' % path)
def safe_to_hash(s):
return int(binascii.hexlify(s), 16)
def nonrandom_choice(seed, seq):
"""
Pick an element from the specified sequence ``seq`` based on the value of
the specified string ``seed``. Guaranteed to be deterministic, tries to be
uniform.
:param seed:
Any string specifier to control the element.
:type seed:
string
:param seq:
Python sequence to pick from.
:type seq:
sequence object
:returns:
An element from ``seq``.
"""
return random.Random(safe_to_hash(seed)).choice(seq)
def nonrandom(seed, n):
"""
Return a deterministic but pseudo-random number between 0 and ``n``, based
on the value of ``seed``. Guaranteed to be deterministic, tries to be
uniform.
:param seed:
Any string specifier to control the output.
:type seed:
string
:param n:
Range of output value.
:type n:
float
:returns:
Float between 0 and ``n``.
:rtype:
float
"""
return random.Random(safe_to_hash(seed)).random() * n
def choose_population(seed, populations=None):
"""
Randomly pick an element from populations according to type.
:param seed:
Any string specifier to control the output.
:type seed:
string
:param populations:
If not specified, perform a straight AB test between True and False. If
specified as a sequence, pick uniformly from the sequence. If specified
as a dict, use the keys as population names and the values as the
weight that each population should receive, and distribute between them
according to weight.
:type populations:
None, sequence, or dict
:returns:
Selected population
"""
# Uniform distribution between True, False
if populations is None:
return nonrandom_choice(seed, (True, False))
# Uniform distribution over populations
if isinstance(populations, list):
return nonrandom_choice(seed, populations)
# Weighted distribution over populations
if isinstance(populations, dict):
pop_name = []
pop_mass = []
running_mass = 0
# XXX FIXME This should sort population names before picking one, so
# that consistent results are returned even when Python has hash
# randomization enabled.
for name, mass in sorted(populations.items()):
if mass > 0:
pop_name.append(name)
pop_mass.append(running_mass)
running_mass += mass
if running_mass == 0:
raise ValueError("Need at least one option with probability > 0")
r = nonrandom(seed, running_mass)
i = bisect.bisect(pop_mass, r) - 1
return pop_name[i]
raise ValueError("Invalid population description")
def decode_http_header(raw):
"""
Decode a raw HTTP header into a unicode string. RFC 2616 specifies that
they should be latin1-encoded (a.k.a. iso-8859-1). If the passed-in value
is None, return an empty unicode string.
:param raw:
Raw HTTP header string.
:type raw:
string (non-unicode)
:returns:
Decoded HTTP header.
:rtype:
unicode string
"""
if raw:
return raw.decode('iso-8859-1', 'replace')
else:
return u''
def decode_url(raw):
"""
Decode a URL into a unicode string. Expected to be UTF-8.
:param raw:
Raw URL string.
:type raw:
string (non-unicode)
:returns:
Decode URL.
:rtype:
unicode string
"""
return raw.decode('utf-8')
def constant_time_compare(a, b):
"Compare two strings with constant time. Used to prevent timing attacks."
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def nonce():
"""
Build a cryptographically random nonce.
:returns:
Hex string, with 20 bytes (40 hex chars).
:rtype:
string
"""
return binascii.hexlify(os.urandom(20))
class SignerError(Exception):
pass
class BadData(SignerError):
pass
class BadSignature(SignerError):
pass
class Signer(object):
def __init__(self, secret):
self.key = hashlib.sha1('manhattan.signer.' + secret).digest()
self.sep = '.'
def get_signature(self, value):
"Compute the signature for the given value."
mac = hmac.new(self.key, msg=value, digestmod=hashlib.sha1)
return binascii.hexlify(mac.digest())
def sign(self, value):
return "%s%s%s" % (value, self.sep, self.get_signature(value))
def unsign(self, signed_value):
if self.sep not in signed_value:
raise BadData('No separator %r found in cookie: %s' %
(self.sep, signed_value))
signed_value = signed_value.lower()
value, sig = signed_value.rsplit(self.sep, 1)
expected = self.get_signature(value)
if constant_time_compare(sig, expected):
return value
s = ('Signature for cookie %r does not match: expected %r, '
'got %s' % (signed_value, expected, sig))
raise BadSignature(s)
| {
"repo_name": "storborg/manhattan",
"path": "manhattan/util.py",
"copies": "1",
"size": "6242",
"license": "mit",
"hash": -2663176791994192400,
"line_mean": 25.1171548117,
"line_max": 79,
"alpha_frac": 0.6163088754,
"autogenerated": false,
"ratio": 3.884256378344742,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5000565253744742,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# WCS utilities taken from spectral_cube project
__all__ = ['reindex_wcs', 'drop_axis']
def reindex_wcs(wcs, inds):
"""
Re-index a WCS given indices. The number of axes may be reduced.
Parameters
----------
wcs: astropy.wcs.WCS
The WCS to be manipulated
inds: np.array(dtype='int')
The indices of the array to keep in the output.
e.g. swapaxes: [0,2,1,3]
dropaxes: [0,1,3]
"""
from astropy.wcs import WCS
wcs_parameters_to_preserve = ['cel_offset', 'dateavg', 'dateobs', 'equinox',
'latpole', 'lonpole', 'mjdavg', 'mjdobs', 'name',
'obsgeo', 'phi0', 'radesys', 'restfrq',
'restwav', 'specsys', 'ssysobs', 'ssyssrc',
'theta0', 'velangl', 'velosys', 'zsource']
if not isinstance(inds, np.ndarray):
raise TypeError("Indices must be an ndarray")
if inds.dtype.kind != 'i':
raise TypeError('Indices must be integers')
outwcs = WCS(naxis=len(inds))
for par in wcs_parameters_to_preserve:
setattr(outwcs.wcs, par, getattr(wcs.wcs, par))
cdelt = wcs.wcs.get_cdelt()
pc = wcs.wcs.get_pc()
outwcs.wcs.crpix = wcs.wcs.crpix[inds]
outwcs.wcs.cdelt = cdelt[inds]
outwcs.wcs.crval = wcs.wcs.crval[inds]
outwcs.wcs.cunit = [wcs.wcs.cunit[i] for i in inds]
outwcs.wcs.ctype = [wcs.wcs.ctype[i] for i in inds]
outwcs.wcs.cname = [wcs.wcs.cname[i] for i in inds]
outwcs.wcs.pc = pc[inds[:, None], inds[None, :]]
return outwcs
def drop_axis(wcs, dropax):
"""
Drop the ax on axis dropax
Remove an axis from the WCS
Parameters
----------
wcs: astropy.wcs.WCS
The WCS with naxis to be chopped to naxis-1
dropax: int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
"""
inds = list(range(wcs.wcs.naxis))
inds.pop(dropax)
inds = np.array(inds)
return reindex_wcs(wcs, inds)
| {
"repo_name": "JudoWill/glue",
"path": "glue/utils/wcs.py",
"copies": "1",
"size": "2131",
"license": "bsd-3-clause",
"hash": 1677233635745779200,
"line_mean": 29.884057971,
"line_max": 83,
"alpha_frac": 0.5800093853,
"autogenerated": false,
"ratio": 3.129221732745962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9207473839718635,
"avg_score": 0.0003514556654653273,
"num_lines": 69
} |
from __future__ import absolute_import, division, print_function
# rework as biquads with zeros/poles
import itertools
import json
import numpy as np
import scipy.signal as sig
FS = 44100
filtergroups = {
'pots': {
'signal': [
{'func': sig.butter, 'N': 2, 'fn': 200, 'btype': 'high'},
{'func': sig.butter, 'N': 2, 'fn': 300, 'btype': 'high'},
{'func': sig.cheby2, 'N': 8, 'fn': 3900, 'rs': 50, 'btype': 'low'},
{'func': sig.butter, 'N': 4, 'fn': 5000, 'btype': 'low'},
],
'noiseband': [
{'func': sig.butter, 'N': 4, 'fn': 4000, 'btype': 'low'},
],
}
}
def grouper(n, iterable):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
def generate_filter_coeffs(filters, fs, output='ba'):
for filt in filters:
kwa = filt.copy()
fgen = kwa.pop('func')
kwa['Wn'] = 2 * kwa.pop('fn') / fs
kwa['output'] = output
yield fgen(**kwa)
def make_biquads(filterset, fs=FS):
ZPK = list(generate_filter_coeffs(filterset, fs, output='zpk'))
biquads = []
singles = []
# pair off zeros/poles from each filter set and convert to transfer func
for Z, P, K in ZPK:
zeros = sorted(Z, key=lambda x: -abs(x.imag))
poles = sorted(P, key=lambda x: -abs(x.imag))
for zz, pp in zip(grouper(2, zeros), grouper(2, poles)):
ba = sig.zpk2tf(zz, pp, K)
if len(zz) == 2:
biquads.append(ba)
else:
singles.append(ba)
# convolve the spare singles together to make biquads
for BA in grouper(2, singles):
(b1, a1), (b2, a2) = BA
b = sig.convolve(b1, b2)
a = sig.convolve(a1, a2)
biquads.append((b, a))
return np.array(biquads)
def normalize(biquads, fnorm, fs=FS):
# normalize to frequency
tsamp = np.arange(fs) / fs
test = np.cos(2 * np.pi * fnorm * tsamp)
for b, a in biquads:
d = sig.lfilter(b, a, test)
gain = (max(d[fs//2:]) - min(d[fs//2:])) / 2
#print(gain)
b /= gain
def main():
bqsets = {}
for name, filtersets in filtergroups:
for fls in filtersets:
biquads = make_biquads(filtersets[fls])
normalize(biquads, 1500)
bqsets[fls] = biquads.tolist()
with open(name + '.json', 'w') as f:
json.dump(bqsets, f, indent=2)
if __name__ == '__main__':
main()
| {
"repo_name": "nicktimko/pots-sim",
"path": "potsim/coeffs/coeff_gen.py",
"copies": "1",
"size": "2552",
"license": "mit",
"hash": 5843845755789976000,
"line_mean": 27.043956044,
"line_max": 79,
"alpha_frac": 0.5286050157,
"autogenerated": false,
"ratio": 3.023696682464455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4052301698164455,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
# Standard
from collections import OrderedDict as odict
import multiprocessing
import ctypes as C
# Scientific
import utool as ut
import numpy as np
import time
from os.path import join
from pybing.pybing_helpers import (_load_c_shared_library, _cast_list_to_c, _extract_np_array)
VERBOSE_BING = ut.get_argflag('--verbbing') or ut.VERBOSE
QUIET_BING = ut.get_argflag('--quietbing') or ut.QUIET
VOC2007_MODEL_URL = 'https://lev.cs.rpi.edu/public/models/bing.zip'
#============================
# CTypes Interface Data Types
#============================
'''
Bindings for C Variable Types
'''
NP_FLAGS = 'aligned, c_contiguous, writeable'
# Primatives
C_OBJ = C.c_void_p
C_BYTE = C.c_char
C_CHAR = C.c_char_p
C_INT = C.c_int
C_DOUBLE = C.c_double
C_BOOL = C.c_bool
C_FLOAT = C.c_float
NP_INT8 = np.uint8
NP_FLOAT32 = np.float32
# Arrays
C_ARRAY_CHAR = C.POINTER(C_CHAR)
C_ARRAY_FLOAT = C.POINTER(C_FLOAT)
NP_ARRAY_INT = np.ctypeslib.ndpointer(dtype=C_INT, ndim=1, flags=NP_FLAGS)
NP_ARRAY_FLOAT = np.ctypeslib.ndpointer(dtype=NP_FLOAT32, ndim=2, flags=NP_FLAGS)
RESULTS_ARRAY = np.ctypeslib.ndpointer(dtype=NP_ARRAY_FLOAT, ndim=1, flags=NP_FLAGS)
#=================================
# Method Parameter Types
#=================================
'''
IMPORTANT:
For functions that return void, use Python None as the return value.
For functions that take no parameters, use the Python empty list [].
'''
METHODS = {}
METHODS['init'] = ([
C_DOUBLE, # base
C_INT, # W
C_INT, # NSS
C_BOOL, # verbose
C_BOOL, # quiet
], C_OBJ)
METHODS['model'] = ([
C_OBJ, # detector
C_CHAR, # model_path
C_BOOL, # verbose
C_BOOL, # quiet
], C_OBJ)
METHODS['train2'] = ([
C_OBJ, # detector
C_BOOL, # verbose
C_BOOL, # quiet
], None)
METHODS['detect'] = ([
C_OBJ, # detector
C_ARRAY_CHAR, # input_gpath_array
C_INT, # _input_gpath_num
C_INT, # numPerSz
RESULTS_ARRAY, # results_val_array
NP_ARRAY_INT, # results_len_array
C_INT, # RESULT_LENGTH
C_BOOL, # serial
C_BOOL, # verbose
C_BOOL, # quiet
], None)
RESULT_LENGTH = 4
#=================================
# Load Dynamic Library
#=================================
BING_CLIB = _load_c_shared_library(METHODS)
#=================================
# BING Detector
#=================================
class BING_Detector(object):
def __init__(bing, default=True, verbose=VERBOSE_BING, quiet=QUIET_BING, **kwargs):
'''
Create the C object for the PyBING detector.
Args:
verbose (bool, optional): verbose flag; defaults to --verbbing flag
Kwargs:
base (int)
W (int)
NNS (int)
Returns:
detector (object): the BING Detector object
'''
bing.verbose = verbose
bing.quiet = quiet
# Default values
params = odict([
('base', 2.0),
('W', 8),
('NNS', 2),
('verbose', verbose),
('quiet', quiet),
])
params.update(kwargs)
params_list = list(params.values())
if bing.verbose and not bing.quiet:
""" debug with dmesg | tail -n 200 """
print('[pybing.py] Start Create New BING Object')
ut.print_dict(params)
print('[pybing.py] params_list = %r' % (params_list,))
print('[pybing.py] type of params = %r' % (list(map(type, params_list)),))
pass
bing.detector_c_obj = BING_CLIB.init(*params_list)
if bing.verbose and not bing.quiet:
print('[pybing.py] Finished Create New BING Object')
if default:
model_path = ut.grab_zipped_url(VOC2007_MODEL_URL, appname='pybing')
model_path = join(model_path, 'model')
print('Loading models: %r' % (model_path, ))
bing.model(model_path)
def model(bing, model_path, **kwargs):
'''
Load the model.
Args:
model_path (str): model path
serial (bool, optional): flag to signify if to load the model in serial;
defaults to False
verbose (bool, optional): verbose flag; defaults to object's verbose or
selectively enabled for this function
Returns:
None
'''
# Default values
params = odict([
('verbose', bing.verbose),
('quiet', bing.quiet),
])
params.update(kwargs)
# Data integrity
assert len(model_path) > 0, \
'Must specify at least one model path to load'
params_list = [
model_path,
] + params.values()
BING_CLIB.model(bing.detector_c_obj, *params_list)
def train(bing, **kwargs):
'''
NOT IMPLEMENTED
Returns:
None
'''
raise NotImplementedError()
def detect(bing, input_gpath_list, **kwargs):
'''
Run detection with a given loaded model on a list of images
Args:
input_gpath_list (list of str): the list of image paths that you want
to test
Kwargs:
numPerSz (int): the number of results per size
'''
# Default values
params = odict([
('numPerSz', 130),
('batch_size', None),
('results_val_array', None), # This value always gets overwritten
('results_len_array', None), # This value always gets overwritten
('RESULT_LENGTH', None), # This value always gets overwritten
('serial', False),
('verbose', bing.verbose),
('quiet', bing.quiet),
])
params.update(kwargs)
params['RESULT_LENGTH'] = RESULT_LENGTH
# Try to determine the parallel processing batch size
if params['batch_size'] is None:
try:
cpu_count = multiprocessing.cpu_count()
if not params['quiet']:
print('[pybing py] Detecting with %d CPUs' % (cpu_count, ))
params['batch_size'] = cpu_count
except Exception:
params['batch_size'] = 8
# Run training algorithm
batch_size = params['batch_size']
del params['batch_size'] # Remove this value from params
batch_num = int(len(input_gpath_list) / batch_size) + 1
# Detect for each batch
for batch in ut.ProgressIter(range(batch_num), lbl="[pybing py]", freq=1, invert_rate=True):
begin = time.time()
start = batch * batch_size
end = start + batch_size
if end > len(input_gpath_list):
end = len(input_gpath_list)
input_gpath_list_ = input_gpath_list[start:end]
num_images = len(input_gpath_list_)
# Set image detection to be run in serial if less than half a batch to run
if num_images < min(batch_size / 2, 8):
params['serial'] = True
# Final sanity check
params['results_val_array'] = np.empty(num_images, dtype=NP_ARRAY_FLOAT)
params['results_len_array'] = np.empty(num_images, dtype=C_INT)
# Make the params_list
params_list = [
_cast_list_to_c(input_gpath_list_, C_CHAR),
num_images,
] + params.values()
BING_CLIB.detect(bing.detector_c_obj, *params_list)
results_list = _extract_np_array(params['results_len_array'], params['results_val_array'], NP_ARRAY_FLOAT, NP_FLOAT32, RESULT_LENGTH)
conclude = time.time()
if not params['quiet']:
print('[pybing py] Took %r seconds to compute %d images' % (conclude - begin, num_images, ))
for input_gpath, result_list in zip(input_gpath_list_, results_list):
result_list_ = []
for result in result_list:
# Unpack result into a nice Python dictionary and return
temp = {}
temp['minx'] = int(result[0])
temp['miny'] = int(result[1])
temp['maxx'] = int(result[2])
temp['maxy'] = int(result[3])
result_list_.append(temp)
yield (input_gpath, result_list_)
params['results_val_array'] = None
params['results_len_array'] = None
# Pickle functions
def dump(bing, file):
'''
UNIMPLEMENTED
Args:
file (object)
Returns:
None
'''
pass
def dumps(bing):
'''
UNIMPLEMENTED
Returns:
string
'''
pass
def load(bing, file):
'''
UNIMPLEMENTED
Args:
file (object)
Returns:
detector (object)
'''
pass
def loads(bing, string):
'''
UNIMPLEMENTED
Args:
string (str)
Returns:
detector (object)
'''
pass
| {
"repo_name": "bluemellophone/pybing",
"path": "pybing/_pybing.py",
"copies": "1",
"size": "9858",
"license": "bsd-3-clause",
"hash": 8727042700767793000,
"line_mean": 30.8,
"line_max": 145,
"alpha_frac": 0.4939135727,
"autogenerated": false,
"ratio": 4.002436053593179,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4996349626293179,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
try:
from collections.abc import Hashable
except ImportError:
from collections import Hashable
from types import GeneratorType
from ._vendor.six import wraps
# TODO: spend time filling out functionality and make these more robust
def memoize(func):
"""
Decorator to cause a function to cache it's results for each combination of
inputs and return the cached result on subsequent calls. Does not support
named arguments or arg values that are not hashable.
>>> @memoize
... def foo(x):
... print('running function with', x)
... return x+3
...
>>> foo(10)
running function with 10
13
>>> foo(10)
13
>>> foo(11)
running function with 11
14
>>> @memoize
... def range_tuple(limit):
... print('running function')
... return tuple(i for i in range(limit))
...
>>> range_tuple(3)
running function
(0, 1, 2)
>>> range_tuple(3)
(0, 1, 2)
>>> @memoize
... def range_iter(limit):
... print('running function')
... return (i for i in range(limit))
...
>>> range_iter(3)
Traceback (most recent call last):
TypeError: Can't memoize a generator or non-hashable object!
"""
func._result_cache = {} # pylint: disable-msg=W0212
@wraps(func)
def _memoized_func(*args, **kwargs):
key = (args, tuple(sorted(kwargs.items())))
if key in func._result_cache: # pylint: disable-msg=W0212
return func._result_cache[key] # pylint: disable-msg=W0212
else:
result = func(*args, **kwargs)
if isinstance(result, GeneratorType) or not isinstance(result, Hashable):
raise TypeError("Can't memoize a generator or non-hashable object!")
func._result_cache[key] = result # pylint: disable-msg=W0212
return result
return _memoized_func
def memoizemethod(method):
"""
Decorator to cause a method to cache it's results in self for each
combination of inputs and return the cached result on subsequent calls.
Does not support named arguments or arg values that are not hashable.
>>> class Foo (object):
... @memoizemethod
... def foo(self, x, y=0):
... print('running method with', x, y)
... return x + y + 3
...
>>> foo1 = Foo()
>>> foo2 = Foo()
>>> foo1.foo(10)
running method with 10 0
13
>>> foo1.foo(10)
13
>>> foo2.foo(11, y=7)
running method with 11 7
21
>>> foo2.foo(11)
running method with 11 0
14
>>> foo2.foo(11, y=7)
21
>>> class Foo (object):
... def __init__(self, lower):
... self.lower = lower
... @memoizemethod
... def range_tuple(self, upper):
... print('running function')
... return tuple(i for i in range(self.lower, upper))
... @memoizemethod
... def range_iter(self, upper):
... print('running function')
... return (i for i in range(self.lower, upper))
...
>>> foo = Foo(3)
>>> foo.range_tuple(6)
running function
(3, 4, 5)
>>> foo.range_tuple(7)
running function
(3, 4, 5, 6)
>>> foo.range_tuple(6)
(3, 4, 5)
>>> foo.range_iter(6)
Traceback (most recent call last):
TypeError: Can't memoize a generator or non-hashable object!
"""
@wraps(method)
def _wrapper(self, *args, **kwargs):
# NOTE: a __dict__ check is performed here rather than using the
# built-in hasattr function because hasattr will look up to an object's
# class if the attr is not directly found in the object's dict. That's
# bad for this if the class itself has a memoized classmethod for
# example that has been called before the memoized instance method,
# then the instance method will use the class's result cache, causing
# its results to be globally stored rather than on a per instance
# basis.
if '_memoized_results' not in self.__dict__:
self._memoized_results = {}
memoized_results = self._memoized_results
key = (method.__name__, args, tuple(sorted(kwargs.items())))
if key in memoized_results:
return memoized_results[key]
else:
try:
result = method(self, *args, **kwargs)
except KeyError as e:
if '__wrapped__' in str(e):
result = None # is this the right thing to do? happened during py3 conversion
else:
raise
if isinstance(result, GeneratorType) or not isinstance(result, Hashable):
raise TypeError("Can't memoize a generator or non-hashable object!")
return memoized_results.setdefault(key, result)
return _wrapper
# class memoizemethod(object):
# """cache the return value of a method
#
# This class is meant to be used as a decorator of methods. The return value
# from a given method invocation will be cached on the instance whose method
# was invoked. All arguments passed to a method decorated with memoize must
# be hashable.
#
# If a memoized method is invoked directly on its class the result will not
# be cached. Instead the method will be invoked like a static method:
# class Obj(object):
# @memoize
# def add_to(self, arg):
# return self + arg
# Obj.add_to(1) # not enough arguments
# Obj.add_to(1, 2) # returns 3, result is not cached
# """
# def __init__(self, func):
# self.func = func
# def __get__(self, obj, objtype=None):
# if obj is None:
# return self.func
# return partial(self, obj)
# def __call__(self, *args, **kw):
# obj = args[0]
# try:
# cache = obj.__cache
# except AttributeError:
# cache = obj.__cache = {}
# key = (self.func, args[1:], frozenset(kw.items()))
# try:
# res = cache[key]
# except KeyError:
# res = cache[key] = self.func(*args, **kw)
# return res
def clear_memoized_methods(obj, *method_names):
"""
Clear the memoized method or @memoizeproperty results for the given
method names from the given object.
>>> v = [0]
>>> def inc():
... v[0] += 1
... return v[0]
...
>>> class Foo(object):
... @memoizemethod
... def foo(self):
... return inc()
... @memoizeproperty
... def g(self):
... return inc()
...
>>> f = Foo()
>>> f.foo(), f.foo()
(1, 1)
>>> clear_memoized_methods(f, 'foo')
>>> (f.foo(), f.foo(), f.g, f.g)
(2, 2, 3, 3)
>>> (f.foo(), f.foo(), f.g, f.g)
(2, 2, 3, 3)
>>> clear_memoized_methods(f, 'g', 'no_problem_if_undefined')
>>> f.g, f.foo(), f.g
(4, 2, 4)
>>> f.foo()
2
"""
for key in list(getattr(obj, '_memoized_results', {}).keys()):
# key[0] is the method name
if key[0] in method_names:
del obj._memoized_results[key]
property_dict = obj._cache_
for prop in method_names:
inner_attname = '__%s' % prop
if inner_attname in property_dict:
del property_dict[inner_attname]
def memoizedproperty(func):
"""
Decorator to cause a method to cache it's results in self for each
combination of inputs and return the cached result on subsequent calls.
Does not support named arguments or arg values that are not hashable.
>>> class Foo (object):
... _x = 1
... @memoizedproperty
... def foo(self):
... self._x += 1
... print('updating and returning {0}'.format(self._x))
... return self._x
...
>>> foo1 = Foo()
>>> foo2 = Foo()
>>> foo1.foo
updating and returning 2
2
>>> foo1.foo
2
>>> foo2.foo
updating and returning 2
2
>>> foo1.foo
2
"""
inner_attname = '__%s' % func.__name__
def new_fget(self):
if not hasattr(self, '_cache_'):
self._cache_ = dict()
cache = self._cache_
if inner_attname not in cache:
cache[inner_attname] = func(self)
return cache[inner_attname]
return property(new_fget)
# def memoized_property(fget):
# """
# Return a property attribute for new-style classes that only calls its getter on the first
# access. The result is stored and on subsequent accesses is returned, preventing the need to
# call the getter any more.
# Example::
# >>> class C(object):
# ... load_name_count = 0
# ... @memoized_property
# ... def name(self):
# ... "name's docstring"
# ... self.load_name_count += 1
# ... return "the name"
# >>> c = C()
# >>> c.load_name_count
# 0
# >>> c.name
# "the name"
# >>> c.load_name_count
# 1
# >>> c.name
# "the name"
# >>> c.load_name_count
# 1
# """
# attr_name = '_{0}'.format(fget.__name__)
#
# @wraps(fget)
# def fget_memoized(self):
# if not hasattr(self, attr_name):
# setattr(self, attr_name, fget(self))
# return getattr(self, attr_name)
#
# return property(fget_memoized)
class classproperty(object): # pylint: disable=C0103
# from celery.five
def __init__(self, getter=None, setter=None):
if getter is not None and not isinstance(getter, classmethod):
getter = classmethod(getter)
if setter is not None and not isinstance(setter, classmethod):
setter = classmethod(setter)
self.__get = getter
self.__set = setter
info = getter.__get__(object) # just need the info attrs.
self.__doc__ = info.__doc__
self.__name__ = info.__name__
self.__module__ = info.__module__
def __get__(self, obj, type_=None):
if obj and type_ is None:
type_ = obj.__class__
return self.__get.__get__(obj, type_)()
def __set__(self, obj, value):
if obj is None:
return self
return self.__set.__get__(obj)(value)
def setter(self, setter):
return self.__class__(self.__get, setter)
# memoize & clear:
# class method
# function
# classproperty
# property
# staticproperty?
# memoizefunction
# memoizemethod
# memoizeproperty
#
#
| {
"repo_name": "int19h/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/_vendor/auxlib/decorators.py",
"copies": "7",
"size": "10577",
"license": "apache-2.0",
"hash": -3819140168408250400,
"line_mean": 29.3936781609,
"line_max": 99,
"alpha_frac": 0.5568686773,
"autogenerated": false,
"ratio": 3.741422002122391,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.779829067942239,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
try:
from guitool_ibeis import api_tree_node_cython
if 'api_tree_node_cython' not in globals():
raise ImportError('')
print('[test_treenode] cython ON')
except ImportError:
print('[test_treenode] cython OFF')
from guitool_ibeis import api_tree_node
def test_build_internal_structure():
"""
CommandLine:
python -m guitool_ibeis.tests.test_treenode --test-test_build_internal_structure
Example:
>>> # DISABLE_DOCTEST
>>> from guitool_ibeis.tests.test_treenode import * # NOQA
>>> test_build_internal_structure()
"""
if 'api_tree_node_cython' in globals():
print('test cython ON')
_test_build_internal_structure(api_tree_node_cython, 'cython')
else:
print('test cython OFF')
_test_build_internal_structure(api_tree_node, 'python')
print('finished all tests')
def _test_build_internal_structure(_module, lang):
import utool as ut
# Test data
N = 6
#N = 2000
def ider_level0():
return range(N)
def ider_level1(input_):
_single = lambda x: [y for y in range(x ** 2, x ** 2 + max(0, ((N // 1) - x - 1)))]
if isinstance(input_, list):
return [_single(x) for x in input_]
else:
x = input_
return _single(x)
# Build Structure
ider_list = [ider_level0, ider_level1]
num_levels = len(ider_list)
# TEST RECURSIVE
print('================')
with ut.Timer(lang + ' recursive:'):
if num_levels == 0:
root_id_list = []
else:
root_id_list = ider_list[0]()
root_node1 = _module.TreeNode(-1, None, -1)
level = 0
_module._populate_tree_recursive(
root_node1, root_id_list, num_levels, ider_list, level)
if N < 10:
print('')
print(api_tree_node.tree_node_string(root_node1, indent=' * '))
print('================')
#with ut.Timer(lang + ' iterative:'):
# # TEST ITERATIVE
# # TODO: Vet this code a bit more.
# root_node2 = _module.TreeNode(-1, None, -1)
# _module._populate_tree_iterative(
# root_node2, num_levels, ider_list)
#if N < 10:
# print('')
# print(api_tree_node.tree_node_string(root_node2, indent=' * '))
print('================')
print('finished %s test' % lang)
if __name__ == '__main__':
"""
CommandLine:
python -m guitool_ibeis.tests.test_treenode
python -m guitool_ibeis.tests.test_treenode --allexamples
python -m guitool_ibeis.tests.test_treenode --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| {
"repo_name": "Erotemic/guitool",
"path": "guitool_ibeis/tests/test_treenode.py",
"copies": "1",
"size": "2822",
"license": "apache-2.0",
"hash": 3716520215212929500,
"line_mean": 30.010989011,
"line_max": 91,
"alpha_frac": 0.5747696669,
"autogenerated": false,
"ratio": 3.3396449704142013,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4414414637314201,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
#from src.backtrader_indicator_decisions import Decision
from src.stock import Stock, str2date
import datetime # For datetime objects
import os.path # To manage paths
import sys # To find out the script name (in argv[0])
# Import the backtrader platform
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
params = (
('maperiod', 15),
('stake', 10),
)
def log(self, txt, dt=None):
''' Logging function fot this strategy'''
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def __init__(self):
# Keep a reference to the "close" line in the data[0] dataseries
self.dataclose = self.datas[0].close
# Set the sizer stake from the params
self.sizer.setsizing(self.params.stake)
# To keep track of pending orders and buy price/commission
self.order = None
self.buyprice = None
self.buycomm = None
# Add a MovingAverageSimple indicator
self.sma = bt.indicators.SimpleMovingAverage(
self.datas[0], period=self.params.maperiod)
self.sma5 = bt.indicators.SimpleMovingAverage(
self.datas[0], period=5)
self.sma10 = bt.indicators.SimpleMovingAverage(
self.datas[0], period=10)
self.sma15 = bt.indicators.SimpleMovingAverage(
self.datas[0], period=15)
# Indicators for the plotting show
bt.indicators.ExponentialMovingAverage(self.datas[0], period=25)
bt.indicators.WeightedMovingAverage(self.datas[0], period=25,
subplot=True)
bt.indicators.StochasticSlow(self.datas[0])
self.macd = bt.indicators.MACDHisto(self.datas[0])
rsi = bt.indicators.RSI(self.datas[0])
bt.indicators.SmoothedMovingAverage(rsi, period=10)
bt.indicators.ATR(self.datas[0], plot=False)
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enougth cash
if order.status in [order.Completed, order.Canceled, order.Margin]:
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.bar_executed = len(self)
# Write down: no pending order
self.order = None
def notify_trade(self, trade):
if not trade.isclosed:
return
self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' %
(trade.pnl, trade.pnlcomm))
def next(self):
# Simply log the closing price of the series from the reference
self.log('Close, %.2f' % self.dataclose[0])
# Check if an order is pending ... if yes, we cannot send a 2nd one
if self.order:
return
# Check if we are in the market
if not self.position:
# Not yet ... we MIGHT BUY if ...
if self.macd.macd > self.macd.signal:
# BUY, BUY, BUY!!! (with all possible default parameters)
self.log('BUY CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.buy(size=100)
else:
if self.macd.macd > self.macd.signal and self.macd.macd[0] < self.macd.macd[-1]:
# SELL, SELL, SELL!!! (with all possible default parameters)
self.log('SELL CREATE, %.2f' % self.dataclose[0])
# Keep track of the created order to avoid a 2nd order
self.order = self.sell(size=100)
def get_datapath(start, end ):
try:
s = Stock(sys.argv[1])
except:
print('Need to provide Stock symbol as an argument')
sys.exit(1)
return s.range2csv(start, end)
def get_start_end():
if len(sys.argv) >= 3:
start = str2date(sys.argv[2])
print('using user start:', start)
else:
start = datetime.datetime(2014, 1 , 24)
print('using default start:', start)
if len(sys.argv) >= 4:
end = str2date(sys.argv[3])
print('using user end:', end)
else:
end = datetime.datetime.now()
print('using default end:', end)
return start, end
if __name__ == '__main__':
# Create a cerebro entity
cerebro = bt.Cerebro()
# print 1
# Add a strategy
cerebro.addstrategy(TestStrategy)
#print 2
# Datas are in a subfolder of the samples. Need to find where the script is
# because it could have been called from anywhere
#modpath = os.path.dirname(os.path.abspath(sys.argv[0]))
start, end = get_start_end()
datapath = get_datapath(start, end)
# Create a Data Feed
data = bt.feeds.YahooFinanceCSVData(
dataname=datapath,
# Do not pass values before this date
fromdate=start,
# Do not pass values before this date
todate=end ,
# Do not pass values after this date
reverse=False)
#print 3
# Add the Data Feed to Cerebro
cerebro.adddata(data)
#print 4
# Set our desired cash start
cerebro.broker.setcash(10000.0)
# Set the commission
cerebro.broker.setcommission(commission=0.05)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Plot the result
cerebro.plot() | {
"repo_name": "tbkraf08/trader",
"path": "backtrader_main.py",
"copies": "1",
"size": "5401",
"license": "mit",
"hash": -6485780436352620000,
"line_mean": 26.8453608247,
"line_max": 83,
"alpha_frac": 0.6902425477,
"autogenerated": false,
"ratio": 2.938520130576714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4128762678276714,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__author__ = 'arkilic'
import os
import yaml
import logging
logger = logging.getLogger(__name__)
def load_configuration(name, prefix, fields):
"""
Load configuration data form a cascading series of locations.
The precedence order is (highest priority last):
1. CONDA_ENV/etc/{name}.yaml (if CONDA_ETC_ env is defined)
2. /etc/{name}.yml
3. ~/.config/{name}/connection.yml
4. reading {PREFIX}_{FIELD} environmental variables
Parameters
----------
name : str
The expected base-name of the configuration files
prefix : str
The prefix when looking for environmental variables
fields : iterable of strings
The required configuration fields
Returns
------
conf : dict
Dictionary keyed on ``fields`` with the values extracted
"""
filenames = [os.path.join('/etc', name + '.yml'),
os.path.join(os.path.expanduser('~'), '.config',
name, 'connection.yml'),
]
if 'CONDA_ETC_' in os.environ:
filenames.insert(0, os.path.join(os.environ['CONDA_ETC_'],
name + '.yml'))
config = {}
for filename in filenames:
if os.path.isfile(filename):
with open(filename) as f:
config.update(yaml.load(f))
logger.debug("Using db connection specified in config file. \n%r",
config)
for field in fields:
var_name = prefix + '_' + field.upper().replace(' ', '_')
config[field] = os.environ.get(var_name, config.get(field, None))
missing = [k for k, v in config.items() if v is None]
if missing:
raise KeyError("The configuration field(s) {0} were not found in any "
"file or environmental variable.".format(missing))
return config
# connection_config = load_configuration('metadatastore', 'MDS',
# ['host', 'server', 'port', 'timezone'])
connection_config = {'host': '127.0.0.1', 'port': 7779, 'protocol': 'http'}
| {
"repo_name": "mrkraimer/metadataservice",
"path": "metadataservice/client/conf.py",
"copies": "1",
"size": "2204",
"license": "bsd-3-clause",
"hash": 8706665766088192000,
"line_mean": 31.8955223881,
"line_max": 80,
"alpha_frac": 0.5730490018,
"autogenerated": false,
"ratio": 4.190114068441065,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016459675518500385,
"num_lines": 67
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .aes import aes
from . import colors, shapes, size, linetypes, alphas
def assign_visual_mapping(data, aes, gg):
"""Assigns the visual mapping to the given data and adds the right legend
Parameters
----------
data : DataFrame
dataframe which should have aesthetic mappings assigned to
aes : aesthetic
mapping, visual value to variable
gg : ggplot object,
It holds global configuration values needed by
some of the mapping functions
Returns
-------
data : DataFrame
the changed dataframe with visual values added
legend : dict
A legend as specified in `components.legend`
"""
legend = {}
data, legend['color'] = colors.assign_colors(data, aes, gg, 'color')
data, legend['fill'] = colors.assign_colors(data, aes, gg, 'fill')
data, legend['size'] = size.assign_sizes(data, aes)
data, legend['linetype'] = linetypes.assign_linetypes(data, aes)
data, legend['shape'] = shapes.assign_shapes(data, aes)
data, legend['alpha'] = alphas.assign_alphas(data, aes)
# Delete empty entries in the legend
for _aes_name in ('color', 'fill', 'size', 'linetype', 'shape', 'alpha'):
if not legend[_aes_name]:
del legend[_aes_name]
return data, legend
| {
"repo_name": "ricket1978/ggplot",
"path": "ggplot/components/__init__.py",
"copies": "12",
"size": "1402",
"license": "bsd-2-clause",
"hash": -4193330693419276000,
"line_mean": 34.05,
"line_max": 77,
"alpha_frac": 0.6390870185,
"autogenerated": false,
"ratio": 3.938202247191011,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from bson import ObjectId
from bson.errors import InvalidId
import six
from doct import Document
from jsonschema import validate as js_validate
import uuid
import time as ttime
import pymongo
class DatumNotFound(Exception):
pass
DuplicateKeyError = pymongo.errors.DuplicateKeyError
def doc_or_uid_to_uid(doc_or_uid):
"""Given Document or uid return the uid
Parameters
----------
doc_or_uid : dict or str
If str, then assume uid and pass through, if not, return
the 'uid' field
Returns
-------
uid : str
A string version of the uid of the given document
"""
if not isinstance(doc_or_uid, six.string_types):
try:
doc_or_uid = doc_or_uid['uid']
except TypeError:
pass
return doc_or_uid
def _get_datum_from_eid(col, eid, datum_cache, logger):
try:
datum = datum_cache[eid]
except KeyError:
keys = ['datum_kwargs', 'resource']
# find the current document
edoc = col.find_one({'datum_id': eid})
if edoc is None:
raise DatumNotFound(
"No datum found with datum_id {!r}".format(eid))
# save it for later
datum = {k: edoc[k] for k in keys}
res = edoc['resource']
count = 0
for dd in col.find({'resource': res}):
count += 1
d_id = dd['datum_id']
if d_id not in datum_cache:
datum_cache[d_id] = {k: dd[k] for k in keys}
if count > datum_cache.max_size:
logger.warn("More datum in a resource than your "
"datum cache can hold.")
return datum
def retrieve(col, eid, datum_cache, get_spec_handler, logger):
datum = _get_datum_from_eid(col, eid, datum_cache, logger)
handler = get_spec_handler(datum['resource'])
return handler(**datum['datum_kwargs'])
def resource_given_eid(col, eid, datum_cache, logger):
datum = _get_datum_from_eid(col, eid, datum_cache, logger)
return datum['resource']
def resource_given_uid(col, resource):
uid = doc_or_uid_to_uid(resource)
try:
uid = ObjectId(uid)
except InvalidId:
ret = col.find_one({'uid': uid})
else:
ret = col.find_one({'_id': uid})
if ret is None:
raise RuntimeError('did not find resource {!r}'.format(resource))
oid = ret.pop('_id')
ret.setdefault('uid', oid)
ret['id'] = ret['uid']
return Document('resource', ret)
def bulk_insert_datum(col, resource, datum_ids,
datum_kwarg_list):
resource_id = doc_or_uid_to_uid(resource)
def datum_factory():
for d_id, d_kwargs in zip(datum_ids, datum_kwarg_list):
datum = dict(resource=resource_id,
datum_id=str(d_id),
datum_kwargs=dict(d_kwargs))
yield datum
bulk = col.initialize_unordered_bulk_op()
for dm in datum_factory():
bulk.insert(dm)
return bulk.execute()
def insert_datum(col, resource, datum_id, datum_kwargs, known_spec,
resource_col):
try:
resource['spec']
except (AttributeError, TypeError):
resource = resource_col.find_one({'uid': resource})
spec = resource['spec']
if spec in known_spec:
js_validate(datum_kwargs, known_spec[spec]['datum'])
datum = dict(resource=resource['uid'],
datum_id=str(datum_id),
datum_kwargs=dict(datum_kwargs))
col.insert_one(datum)
# do not leak mongo objectID
datum.pop('_id', None)
return Document('datum', datum)
def insert_resource(col, spec, resource_path, resource_kwargs,
known_spec, root):
resource_kwargs = dict(resource_kwargs)
if spec in known_spec:
js_validate(resource_kwargs, known_spec[spec]['resource'])
resource_object = dict(spec=str(spec),
resource_path=str(resource_path),
root=str(root),
resource_kwargs=resource_kwargs,
uid=str(uuid.uuid4()))
col.insert_one(resource_object)
# maintain back compatibility
resource_object['id'] = resource_object['uid']
resource_object.pop('_id')
return Document('resource', resource_object)
def update_resource(update_col, resource_col, old, new, cmd, cmd_kwargs):
'''Update a resource document
Parameters
----------
update_col : Collection
The collection to record audit trail in
resource_col : Collection
The resource collection
old : dict
The old resource document
new : dict
The new resource document
cmd : str
The name of the operation which generated this update
cmd_kwargs : dict
The arguments that went into the update (excluding the resource id)
Returns
-------
ret : Document
The new resource document
log_object : dict
The history object inserted (with oid removed)
'''
if old['uid'] != new['uid']:
raise RuntimeError('must not change the resource uid')
uid = old['uid']
log_object = {'resource': uid,
'old': old,
'new': new,
'time': ttime.time(),
'cmd': cmd,
'cmd_kwargs': cmd_kwargs}
update_col.insert_one(log_object)
result = resource_col.replace_one({'uid': uid}, new)
ret = resource_given_uid(resource_col, uid)
# TODO look inside of result
del result
log_object.pop('_id')
return ret, log_object
def get_resource_history(col, resource):
uid = doc_or_uid_to_uid(resource)
cursor = col.find({'resource': uid}).sort('time')
for doc in cursor:
for k in ['new', 'old']:
d = doc[k]
d.pop('_id', None)
d['id'] = d['uid']
doc[k] = Document('resource', d)
doc.pop('_id')
yield Document('update', doc)
def get_datum_by_res_gen(datum_col, resource_uid):
'''Given a resource uid, get all datums
No order is guaranteed.
Internally the result of this is passed to the `get_file_list` method
of the handler object in `change_root`
Parameters
----------
datam_col : Collection
The Datum collection
resource_uid : Document or str
The resource to work on
Yields
------
datum : doct.Document
'''
resource_uid = doc_or_uid_to_uid(resource_uid)
cur = datum_col.find({'resource': resource_uid})
for d in cur:
yield Document('datum', d)
def get_file_list(resource, datum_kwarg_gen, get_spec_handler):
"""
Given a resource and an iterable of datum kwargs, get a list of
associated files.
DO NOT USE FOR COPYING OR MOVING. This is for debugging only.
See the methods for moving and copying on the FileStore object.
"""
handler = get_spec_handler(resource['uid'])
return handler.get_file_list(datum_kwarg_gen)
| {
"repo_name": "NSLS-II/filestore",
"path": "filestore/core.py",
"copies": "1",
"size": "7127",
"license": "bsd-3-clause",
"hash": -5547259772572776000,
"line_mean": 26.4115384615,
"line_max": 75,
"alpha_frac": 0.5880454609,
"autogenerated": false,
"ratio": 3.898796498905908,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4986841959805908,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from builtins import str # noqa pylint: disable=redefined-builtin
import copy
import os
try:
from unittest.mock import sentinel, patch
except ImportError:
from mock import sentinel, patch
import pytest
from plpacker.config import Configuration, CliArgInjector
class TestConfigConstructor(object):
@patch.object(Configuration, '_load_file')
@patch.object(Configuration, '_find_config_file')
@patch.object(Configuration, '_merge')
def test_default_call_flow(self, merge_mock, find_mock, load_mock):
# pylint: disable=no-self-use
cli_args = {'cli_args': '92VzQ'}
find_mock.return_value = sentinel.conf_path
load_mock.side_effect = (sentinel.work_data, sentinel.file_data)
config = Configuration(cli_args)
assert config.cli_args == cli_args
assert config.defaults_file_path.endswith(
'plpacker/conf/DEFAULT_CONFIG.yaml')
assert config.config_file_path == sentinel.conf_path
assert config.data == sentinel.work_data
merge_mock.assert_called_with(sentinel.work_data,
sentinel.file_data,
cli_args)
def test_default_values(self):
# pylint: disable=no-self-use
config = Configuration({})
assert config.data['virtualenv']['python'] == 'python2.7'
assert config.data['virtualenv']['path'] is None
assert not config.data['virtualenv']['keep']
assert config.data['virtualenv']['pip']['requirements'] == []
assert config.data['virtualenv']['pip']['packages'] == []
assert config.data['virtualenv']['default_excludes'] == [
'easy_install.*',
'pip*/**',
'py-lambda-packer.yaml',
'setuptools*/**',
'wheel*/**']
assert config.data['packager']['target'] == 'py-lambda-package.zip'
assert config.data['packager']['build_path'] is None
assert not config.data['packager']['keep']
assert not config.data['packager']['followlinks']
assert config.data['packager']['includes'] == []
assert config.data['packager']['excludes'] == []
assert config.data['packager']['default_excludes'] == [
'**/*~',
'**/*.swp',
'**/.*.swp',
'**/.DS_Store',
'**/.DS_Store?',
'**/._*',
'**/.Spotlight-V100',
'**/.Trashes',
'**/Icon?',
'**/ehthumbs.db',
'**/Thumbs.db']
def test_validate_schema(self):
# pylint: disable=no-self-use
# Poor version of scheme validation.
config = Configuration({})
assert sorted(config.data.keys()) == ['packager', 'virtualenv']
assert sorted(config.data['packager'].keys()) == [
'build_path', 'default_excludes', 'excludes', 'followlinks',
'includes', 'keep', 'target']
assert sorted(config.data['virtualenv'].keys()) == [
'default_excludes', 'keep', 'path', 'pip', 'python']
assert sorted(config.data['virtualenv']['pip'].keys()) == [
'packages', 'requirements']
class TestConfigMergeDicts(object):
good_merge_data = (
({}, {}, {}),
({'a': '1', 'b': '2'},
{},
{'a': '1', 'b': '2'}),
({},
{'a': '1', 'b': '2'},
{'a': '1', 'b': '2'}),
({'a': '1', 'b': '2'},
{'a': '1', 'b': '2'},
{'a': '1', 'b': '2'}),
({'a': '1', 'b': '2'},
{'c': '3'},
{'a': '1', 'b': '2', 'c': '3'}),
({'a': {'a1': '1a'}, 'b': '2'},
{'a': {'a2': '2a'}},
{'a': {'a1': '1a', 'a2': '2a'}, 'b': '2'}),
)
@pytest.mark.parametrize("left,right,expected", good_merge_data)
def test_good_data(self, left, right, expected):
# pylint: disable=no-self-use,protected-access
config = Configuration({})
config._merge_dicts(left, right)
assert left == expected
bad_merge_data = (
({'a': {'a1': '1a'}, 'b': '2'},
{'a': '1'},
'a'),
({'a': '1'},
{'a': {'a1': '1a'}, 'b': '2'},
'a'),
({'a': {'a1': '1a'}, 'b': '2'},
{'a': {'a1': {}}},
'a.a1'),
)
@pytest.mark.parametrize("left,right,expected", bad_merge_data)
def test_bad_data(self, left, right, expected):
# pylint: disable=no-self-use,protected-access
config = Configuration({})
with pytest.raises(ValueError) as info:
config._merge_dicts(left, right)
assert str(info.value) == \
'Conflict at : {}'.format(expected)
class TestConfigMergeCliArgs(object):
def test_empty_stays_empty(self):
# pylint: disable=no-self-use,protected-access
config = Configuration({})
data_in = {}
config._merge_cli_args({}, {})
assert data_in == {}
def test_no_changes(self):
# pylint: disable=no-self-use,protected-access
config = Configuration({})
default_data_ori = copy.deepcopy(config.data)
default_data_in = copy.deepcopy(config.data)
config._merge_cli_args(default_data_in, {})
assert default_data_in == default_data_ori
def test_just_with_cli_args(self):
# pylint: disable=no-self-use,protected-access
config = Configuration({})
cli_args = self.cli_args_sentinals()
merged_data = {}
config._merge_cli_args(merged_data, cli_args)
# `cli_args_sentinals` instead of `cli_args` just for in case
# `cli_args` was tampered with.
cli_args_sentinals = self.cli_args_sentinals()
assert merged_data['virtualenv']['python'] \
== cli_args_sentinals['python']
assert merged_data['virtualenv']['path'] \
== cli_args_sentinals['virtualenv_dir']
assert merged_data['virtualenv']['keep'] \
== cli_args_sentinals['keep_virtualenv']
assert merged_data['virtualenv']['pip']['requirements'] \
== cli_args_sentinals['requirements']
assert merged_data['virtualenv']['pip']['packages'] \
== cli_args_sentinals['packages']
assert merged_data['packager']['target'] \
== cli_args_sentinals['output']
assert merged_data['packager']['build_path'] \
== cli_args_sentinals['archive_dir']
assert merged_data['packager']['keep'] \
== cli_args_sentinals['keep_archive']
assert merged_data['packager']['followlinks'] \
== cli_args_sentinals['followlinks']
assert merged_data['packager']['includes'] \
== cli_args_sentinals['includes']
assert merged_data['packager']['excludes'] \
== cli_args_sentinals['excludes']
def test_default_data_with_cli_args(self):
# pylint: disable=no-self-use,protected-access
config = Configuration({})
cli_args = self.cli_args_sentinals()
merged_data = copy.deepcopy(config.data)
config._merge_cli_args(merged_data, cli_args)
# `cli_args_sentinals` instead of `cli_args` just for in case
# `cli_args` was tampered with.
cli_args_sentinals = self.cli_args_sentinals()
assert merged_data['virtualenv']['python'] \
== cli_args_sentinals['python']
assert merged_data['virtualenv']['path'] \
== cli_args_sentinals['virtualenv_dir']
assert merged_data['virtualenv']['keep'] \
== cli_args_sentinals['keep_virtualenv']
assert merged_data['virtualenv']['pip']['requirements'] \
== cli_args_sentinals['requirements']
assert merged_data['virtualenv']['pip']['packages'] \
== cli_args_sentinals['packages']
assert merged_data['packager']['target'] \
== cli_args_sentinals['output']
assert merged_data['packager']['build_path'] \
== cli_args_sentinals['archive_dir']
assert merged_data['packager']['keep'] \
== cli_args_sentinals['keep_archive']
assert merged_data['packager']['followlinks'] \
== cli_args_sentinals['followlinks']
assert merged_data['packager']['includes'] \
== cli_args_sentinals['includes']
assert merged_data['packager']['excludes'] \
== cli_args_sentinals['excludes']
@patch('plpacker.config.CliArgInjector')
def test_utilizes_injector(self, injector):
# pylint: disable=no-self-use,protected-access
config = Configuration({})
config._merge_cli_args(sentinel.data, sentinel.args)
injector.assert_called_with(sentinel.data, sentinel.args)
@patch.object(CliArgInjector, 'map')
def test_calls_injector_map(self, map_mock):
# pylint: disable=protected-access
config = Configuration({})
cli_args = self.cli_args_sentinals()
config._merge_cli_args({}, cli_args)
assert map_mock.call_count == 11
@staticmethod
def cli_args_sentinals():
return {
'archive_dir': sentinel.archive_dir,
'config_file': sentinel.config_file,
'excludes': sentinel.excludes,
'followlinks': sentinel.followlinks,
'includes': sentinel.includes,
'keep_archive': sentinel.keep_archive,
'keep_virtualenv': sentinel.keep_virtualenv,
'output': sentinel.output,
'packages': sentinel.packages,
'python': sentinel.python,
'requirements': sentinel.requirements,
'virtualenv_dir': sentinel.virtualenv_dir}
class TestConfigFindConfigFile(object):
@patch('plpacker.config.expand_path')
def test_calls_expand_path_on_valid(self, expand_path_mock):
# pylint: disable=no-self-use,protected-access
expand_path_mock.return_value = sentinel.expanded_path
config = Configuration({})
path = config._find_config_file(sentinel.config_file_path)
assert path == sentinel.expanded_path
expand_path_mock.assert_called_with(sentinel.config_file_path, True)
@patch.object(Configuration, '_load_file')
def test_returns_config_in_cwd(self, load_file_mock, source_fs):
# pylint: disable=no-self-use,protected-access,unused-argument
load_file_mock.return_value = {}
os.chdir('/home/foo/src/bar-project')
config = Configuration({})
path = config._find_config_file(None)
assert path == '/home/foo/src/bar-project/py-lambda-packer.yaml'
@patch.object(Configuration, '_load_file')
def test_nothing_found(self, load_file_mock, source_fs):
# pylint: disable=no-self-use,protected-access,unused-argument
load_file_mock.return_value = {}
os.chdir('/home')
config = Configuration({})
path = config._find_config_file(None)
assert path is None
class TestConfigMerge(object):
@patch.object(Configuration, '_merge_cli_args')
@patch.object(Configuration, '_merge_dicts')
def test_everything_provided(self, merge_dicts_mock, merge_cli_args_mock):
# pylint: disable=no-self-use,protected-access
merge_dicts_mock.return_value = sentinel.merge_dicts
merge_cli_args_mock.return_value = sentinel.merge_cli_args
config = Configuration({})
config._merge(sentinel.work_data, sentinel.file_data,
sentinel.cli_args)
merge_dicts_mock.assert_called_with(sentinel.work_data,
sentinel.file_data)
merge_cli_args_mock.assert_called_with(sentinel.work_data,
sentinel.cli_args)
@patch.object(Configuration, '_merge_cli_args')
@patch.object(Configuration, '_merge_dicts')
def test_file_data_missing(self, merge_dicts_mock, merge_cli_args_mock):
# pylint: disable=no-self-use,protected-access
merge_cli_args_mock.return_value = sentinel.merge_cli_args
config = Configuration({})
config._merge(sentinel.work_data, None, sentinel.cli_args)
merge_dicts_mock.assert_not_called()
merge_cli_args_mock.assert_called_with(sentinel.work_data,
sentinel.cli_args)
@patch.object(Configuration, '_merge_cli_args')
@patch.object(Configuration, '_merge_dicts')
def test_cli_args_missing(self, merge_dicts_mock, merge_cli_args_mock):
# pylint: disable=no-self-use,protected-access
merge_cli_args_mock.return_value = sentinel.merge_cli_args
config = Configuration({})
config._merge(sentinel.work_data, sentinel.file_data, None)
merge_dicts_mock.assert_called_with(sentinel.work_data,
sentinel.file_data)
merge_cli_args_mock.assert_not_called()
@patch.object(Configuration, '_merge_cli_args')
@patch.object(Configuration, '_merge_dicts')
def test_everything_missing(self, merge_dicts_mock, merge_cli_args_mock):
# pylint: disable=no-self-use,protected-access
merge_cli_args_mock.return_value = sentinel.merge_cli_args
config = Configuration({})
config._merge(sentinel.work_data, None, None)
merge_dicts_mock.assert_not_called()
merge_cli_args_mock.assert_not_called()
| {
"repo_name": "digitalrounin/py-lambda-packer",
"path": "tests/test_config.py",
"copies": "1",
"size": "13473",
"license": "mit",
"hash": -804239002096910500,
"line_mean": 38.9792284866,
"line_max": 78,
"alpha_frac": 0.5783418689,
"autogenerated": false,
"ratio": 3.7962806424344886,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4874622511334489,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ccdproc import CCDData
import argparse
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
# disables the s key event for saving.
# plt.rcParams['keymap.save'] = ''
import astropy.units as u
import glob
import sys
import re
from goodman_spec.wsbuilder import ReadWavelengthSolution
def get_args(arguments=None):
parser = argparse.ArgumentParser(
description="Plots image or spectrum")
parser.add_argument('file',
action='store',
nargs='+',
help="File containing fits data.")
args = parser.parse_args(args=arguments)
return args
class DataPlotter(object):
def __init__(self, args):
self.args = args
self.fig = None
self.ax = None
self.file = None
def __call__(self, in_file, save=False):
self.file = in_file
self.fig, self.ax = plt.subplots()
# read data and get its wavelength solution
ccd = CCDData.read(self.file, unit=u.adu)
wcs_reader = ReadWavelengthSolution(header=ccd.header,
data=ccd.data)
wavelength, intensity = wcs_reader()
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
plt.title('{:s}\n{:s}'.format(self.file, ccd.header['OBJECT']))
self.ax.plot(wavelength, intensity, color='k', label='Data')
self.ax.axvline(6562.8, color='r')
self.ax.set_xlim((wavelength[0], wavelength[-1]))
self.ax.set_ylabel('Intensity (ADU)')
self.ax.set_xlabel('Wavelength (Angstrom)')
plt.legend(loc='best')
plt.subplots_adjust(left=0.05,
right=0.99,
top=0.96,
bottom=0.04,
hspace=0.17,
wspace=0.11)
# plt.tight_layout()
if not save:
self.fig.canvas.mpl_connect('key_press_event', self.key_pressed)
plt.show()
# else:
# output = re.sub('.fits', '.png', self.file)
# plt.savefig(output, dpi=600)
def key_pressed(self, event):
# print(event.key)
if event.key == 'q':
plt.close(self.fig)
sys.exit()
# elif event.key == 's':
# self.__call__(in_file=self.file, save=True)
elif event.key == 'n':
plt.close(self.fig)
if __name__ == '__main__':
args = get_args()
if type(args.file) == list and len(args.file) > 1:
file_list = args.file
elif len(args.file) == 1:
# print(args.file)
file_list = glob.glob(args.file[0])
#print(file_list)
plotter = DataPlotter(args=args)
for image in file_list:
plotter(in_file=image) | {
"repo_name": "simontorres/goodman",
"path": "dev-tools/plot-fits-file.py",
"copies": "1",
"size": "2941",
"license": "bsd-3-clause",
"hash": 7762499673119009000,
"line_mean": 27.2884615385,
"line_max": 76,
"alpha_frac": 0.5470928256,
"autogenerated": false,
"ratio": 3.69937106918239,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9721229249676641,
"avg_score": 0.0050469290211498435,
"num_lines": 104
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from citeproc.py2compat import *
import re
import unicodedata
import sys
from functools import reduce, cmp_to_key
from operator import itemgetter
from lxml import etree
from . import NAMES, DATES, NUMBERS
from .source import VariableError, DateRange, LiteralDate
from .string import String
# Base class
class SomewhatObjectifiedElement(etree.ElementBase):
nsmap = {'cs': 'http://purl.org/net/xbiblio/csl',
'xml': 'http://www.w3.org/XML/1998/namespace'}
# TODO: what about multiple instances of the same name?
def __getattr__(self, name):
return self.find('cs:' + name, self.nsmap)
class CitationStylesElement(SomewhatObjectifiedElement):
_default_options = {# global options
'initialize-with-hyphen': 'true',
'page-range-format': None,
'demote-non-dropping-particle': 'display-and-sort',
# inheritable name(s) options
'and': None,
'delimiter-precedes-et-al': 'contextual',
'delimiter-precedes-last': 'contextual',
'et-al-min': 0,
'et-al-use-first': 1,
'et-al-subsequent-min': 0,
'et-al-subsequent-use-first': 1,
'et-al-use-last': 'false',
'initialize-with': None,
'name-as-sort-order': None,
'sort-separator': ', ',
'name-form': 'long',
'name-delimiter': ', ',
'names-delimiter': ''}
def get_root(self):
return self.getroottree().getroot()
def xpath_search(self, expression):
return self.xpath(expression, namespaces=self.nsmap)
@property
def loc(self):
full_xpath = self.getroottree().getpath(self)
xpath = ''
tree = []
for i, node in enumerate(full_xpath.split('/')[1:]):
xpath += '/' + node
element = self.xpath(xpath)[0]
namespace, tag = element.tag.split('}', 1)
attribs = ''.join(' {}="{}"'.format(key, value)
for key, value in element.attrib.items())
tree.append('{:>4}: {}<{}{}>'.format(element.sourceline,
i * ' ', tag, attribs))
print('\n'.join(tree))
def get_option(self, name):
return self.get(name, self._default_options[name])
def get_macro(self, name):
expression = "cs:macro[@name='{}'][1]".format(name)
return self.get_root().xpath_search(expression)[0]
def get_layout(self):
return self.xpath_search('./ancestor-or-self::cs:layout[1]')[0]
def get_formatter(self):
if isinstance(self.get_root(), Locale):
return self.get_root().style.formatter
else:
return self.get_root().formatter
def preformat(self, text):
return self.get_formatter().preformat(text)
def unicode_character(self, name):
return self.preformat(unicodedata.lookup(name))
def render(self, *args, **kwargs):
return self.markup(self.process(*args, **kwargs))
# TODO: Locale methods
def get_term(self, name, form=None):
if isinstance(self.get_root(), Locale):
return self.get_root().get_term(name, form)
else:
for locale in self.get_root().locales:
try:
return locale.get_term(name, form)
except IndexError: # TODO: create custom exception
continue
def get_date(self, form):
for locale in self.get_root().locales:
try:
return locale.get_date(form)
except IndexError:
continue
def get_locale_option(self, name):
for locale in self.get_root().locales:
try:
return locale.get_option(name)
except IndexError:
continue
# Top level elements
class Style(CitationStylesElement):
_locale_fallback = {'de-AT': 'de-DE',
'de-CH': 'de-DE',
'en-GB': 'en-US',
'pt-BR': 'pt-PT',
'zh-TW': 'zh-CN'}
def set_locale_list(self, output_locale, validate=True):
"""Set up list of locales in which to search for localizable units"""
from .frontend import CitationStylesLocale
def search_locale(locale):
return self.xpath_search('./cs:locale[@xml:lang="{}"]'
.format(locale))[0]
self.locales = []
# 1) (in-style locale) chosen dialect
try:
self.locales.append(search_locale(output_locale))
except IndexError:
pass
# 2) (in-style locale) matching language
language = output_locale.split('-')[0]
try:
self.locales.append(search_locale(language))
except IndexError:
pass
# 3) (in-style locale) no language set
try:
expr = './cs:locale[not(@xml:lang)]'
self.locales.append(self.xpath_search(expr)[0])
except IndexError:
pass
# 4) (locale files) chosen dialect
try:
self.locales.append(CitationStylesLocale(output_locale,
validate=validate).root)
except ValueError:
pass
# 5) (locale files) fall back to primary language dialect
try:
fallback_locale = self._locale_fallback[output_locale]
self.locales.append(CitationStylesLocale(fallback_locale,
validate=validate).root)
except KeyError:
pass
# 6) (locale files) default locale (en-US)
if output_locale != 'en-US':
self.locales.append(CitationStylesLocale('en-US',
validate=validate).root)
for locale in self.locales:
locale.style = self
class Locale(CitationStylesElement):
_default_options = {'limit-day-ordinals-to-day-1': 'false',
'punctuation-in-quote': 'false'}
def get_term(self, name, form=None):
attributes = "@name='{}'".format(name)
if form is not None:
attributes += " and @form='{}'".format(form)
else:
attributes += " and not(@form)"
expr = './cs:term[{}]'.format(attributes)
try:
return self.terms.xpath_search(expr)[0]
except AttributeError:
raise IndexError
def get_date(self, form):
expr = "./cs:date[@form='{}']".format(form)
return self.xpath_search(expr)[0]
def get_option(self, name):
options = self.find('cs:style-options', self.nsmap)
if options is None:
raise IndexError
return options.get(name, self._default_options[name])
def get_formatter(self):
return self.style.formatter
class FormattingInstructions(object):
def get_option(self, name):
if name in self._default_options:
return self.get(name, self._default_options[name])
else:
return self.get(name, self.get_root().get_option(name))
def render(self, reference):
raise NotImplementedError
class Citation(FormattingInstructions, CitationStylesElement):
_default_options = {# disambiguation
'disambiguate-add-names': False,
'disambiguate-add-givenname': False,
'givenname-disambiguation-rule': 'all-names',
'disambiguate-add-year-suffix': False,
# citation collapsing
'collapse': None,
'year-suffix-delimiter': None,
'after-collapse-delimiter': None,
# note distance
'near-note-distance': 5}
def render(self, citation, cites, callback):
self.cites = cites
return self.layout.render_citation(citation, callback)
class Bibliography(FormattingInstructions, CitationStylesElement):
_default_options = {# whitespace
'hanging-indent': False,
'second-field-align': None,
'line-spacing': 1,
'entry-spacing': 1,
# reference grouping
'subsequent-author-substitute': None}
def sort(self, citation_items):
return self.layout.sort_bibliography(citation_items)
def render(self, citation_items):
return self.layout.render_bibliography(citation_items)
# Style behavior
class Formatted(object):
def format(self, string):
if isinstance(string, (int, float)):
string = str(string)
text = self.font_style(string)
text = self.font_variant(text)
text = self.font_weight(text)
text = self.text_decoration(text)
text = self.vertical_align(text)
return text
def font_style(self, text):
formatter = self.get_formatter()
font_style = self.get('font-style', 'normal')
if font_style == 'normal':
formatted = text
elif font_style == 'italic':
formatted = formatter.Italic(text)
elif font_style == 'oblique':
formatted = formatter.Oblique(text)
return formatted
def font_variant(self, text):
formatter = self.get_formatter()
font_variant = self.get('font-variant', 'normal')
if font_variant == 'normal':
formatted = text
elif font_variant == 'small-caps':
formatted = formatter.SmallCaps(text)
return formatted
def font_weight(self, text):
formatter = self.get_formatter()
font_weight = self.get('font-weight', 'normal')
if font_weight == 'normal':
formatted = text
elif font_weight == 'bold':
formatted = formatter.Bold(text)
elif font_weight == 'light':
formatted = formatter.Light(text)
return formatted
def text_decoration(self, text):
formatter = self.get_formatter()
text_decoration = self.get('text-decoration', 'none')
if text_decoration == 'none':
formatted = text
elif text_decoration == 'underline':
formatted = formatter.Underline(text)
return formatted
def vertical_align(self, text):
formatter = self.get_formatter()
vertical_align = self.get('vertical-align', 'baseline')
if vertical_align == 'baseline':
formatted = text
elif vertical_align == 'sup':
formatted = formatter.Superscript(text)
elif vertical_align == 'sub':
formatted = formatter.Subscript(text)
return formatted
class Affixed(object):
def wrap(self, string):
if string is not None:
prefix = self.get('prefix', '')
suffix = self.get('suffix', '')
return prefix + string + suffix
return None
class Delimited(object):
def join(self, strings, default_delimiter=''):
delimiter = self.get('delimiter', default_delimiter)
try:
text = reduce(lambda a, b: a + delimiter + b,
filter(lambda s: s is not None, strings))
except:
text = String('')
return text
class Displayed(object):
pass
class Quoted(object):
def quote(self, string):
piq = self.get_locale_option('punctuation-in-quote').lower() == 'true'
if self.get('quotes', 'false').lower() == 'true':
open_quote = self.get_term('open-quote').single
close_quote = self.get_term('close-quote').single
string = open_quote + string + close_quote
## quoted_string = QuotedString(string, open_quote, close_quote, piq)
return string
class StrippedPeriods(object):
def strip_periods(self, string):
strip_periods = self.get('strip-periods', 'false').lower() == 'true'
if strip_periods:
string = string.replace('.', '')
return string
class TextCased(object):
_stop_words = ['a', 'an', 'and', 'as', 'at', 'but', 'by', 'down', 'for',
'from', 'in', 'into', 'nor', 'of', 'on', 'onto', 'or',
'over', 'so', 'the', 'till', 'to', 'up', 'via', 'with',
'yet']
def case(self, text, language=None):
text_case = self.get('text-case')
if text_case is not None:
if language != 'en' and text_case == 'title':
text_case = 'sentence'
if text_case == 'lowercase':
text = text.lower()
elif text_case == 'uppercase':
text = text.upper()
elif text_case == 'capitalize-first':
text = text.capitalize_first()
elif text_case == 'capitalize-all':
output = []
for word in text.words():
word = word.capitalize_first()
output.append(word)
text = ' '.join(output)
elif text_case == 'title':
output = []
prev = ':'
for word in text.words():
if not text.isupper() and not word.isupper():
word = word.soft_lower()
if (str(word) not in self._stop_words or
prev in (':', '.')):
word = word.capitalize_first()
prev = word[-1]
output.append(word)
text = ' '.join(output)
elif text_case == 'sentence':
output = []
for i, word in enumerate(text.words()):
if not text.isupper() and not word.isupper():
word = word.soft_lower()
if i == 0:
word = word.capitalize_first()
output.append(word)
text = ' '.join(output)
return text
# Locale elements
class Term(CitationStylesElement):
@property
def single(self):
try:
text = self.find('cs:single', self.nsmap).text
except AttributeError:
text = self.text
text = self.preformat(text or '')
return String(text)
@property
def multiple(self):
try:
text = self.find('cs:multiple', self.nsmap).text
except AttributeError:
text = self.text
text = self.preformat(text or '')
return String(text)
# Sorting elements
class Sort(CitationStylesElement):
def sort(self, items, context):
# custom sort function to push items with None keys to bottom
def multi_key_sort(items, keys, descending):
lst = zip(items, *keys)
comparers = [(itemgetter(i + 1), descending[i])
for i in range(len(keys))]
def mycmp(left, right):
for getter, desc in comparers:
left_key, right_key = getter(left), getter(right)
if left_key is not None and right_key is not None:
try:
left_key = str(left_key.lower())
right_key = str(right_key.lower())
except AttributeError:
pass
try:
left_key, right_key = (int(str(left_key)),
int(str(right_key)))
except ValueError:
pass
result = (left_key > right_key) - (left_key < right_key)
if result:
return -1 * result if desc else result
elif left_key is not None:
return -1
elif right_key is not None:
return 1
else:
continue
else:
return 0
sorted_lst = sorted(lst, key=cmp_to_key(mycmp))
return [item[0] for item in sorted_lst]
sort_descending = []
sort_keys = []
for key in self.findall('cs:key', self.nsmap):
descending = key.get('sort', 'ascending').lower() == 'descending'
sort_descending.append(descending)
sort_keys.append(key.sort_keys(items, context))
return multi_key_sort(items, sort_keys, sort_descending)
class Key(CitationStylesElement):
def sort_keys(self, items, context):
if 'variable' in self.attrib:
variable = self.get('variable').replace('-', '_')
if variable in NAMES:
sort_keys = [self._format_name(item, variable)
for item in items]
elif variable in DATES:
sort_keys = []
for item in items:
date = item.reference.get(variable)
if date is not None:
sort_keys.append(date.sort_key())
else:
sort_keys.append(None)
elif variable in NUMBERS:
sort_keys = [self._format_number(item, variable)
for item in items]
elif variable == 'citation_number':
sort_keys = [item.number for item in items]
else:
sort_keys = [item.get_field(variable) for item in items]
elif 'macro' in self.attrib:
layout = context.get_layout()
# override name options
sort_options = {'name-as-sort-order': 'all'}
for option in ('names-min', 'names-use-first', 'names-use-last'):
if option in self.attrib:
name = option.replace('names', 'et-al')
sort_options[name] = self.get(option)
macro = self.get_macro(self.get('macro'))
sort_keys = []
for item in items:
layout.repressed = {}
sort_key = macro.render(item, context=context,
sort_options=sort_options)
sort_keys.append(sort_key)
return sort_keys
def _format_name(self, item, variable):
names = item.reference.get(variable)
if names is not None:
output = []
for name in names:
demote_ndp = self.get_root().get('demote-non-dropping-particle',
'display-and-sort').lower()
sort_separator = self._default_options['sort-separator']
# TODO: encapsulate in function (to share with Name)
given, family, dp, ndp, suffix = name.parts()
if demote_ndp in ('sort-only', 'display-and-sort'):
given = ' '.join([n for n in (given, dp, ndp) if n])
else:
family = ' '.join([n for n in (ndp, family) if n])
given = ' '.join([n for n in (given, dp) if n])
order = family, given, suffix
output.append(sort_separator.join([n for n in order if n]))
return ';'.join(output)
else:
return None
def _format_number(self, item, variable):
date = item.reference.get(variable)
if date is not None:
try:
return str(Number.re_numeric.match(date).group(1))
except AttributeError:
return date
else:
return None
# Rendering elements
class Parent(object):
def calls_variable(self):
return any([child.calls_variable() for child in self.getchildren()])
def process_children(self, item, **kwargs):
output = []
for child in self.iterchildren():
try:
text = child.process(item, **kwargs)
if text is not None:
output.append(text)
except VariableError:
pass
if output:
return ''.join(output)
else:
return None
def render_children(self, item, **kwargs):
output = []
for child in self.iterchildren():
try:
text = child.render(item, **kwargs)
if text is not None:
output.append(text)
except VariableError:
pass
if output:
return reduce(lambda a, b: a + b, output)
else:
return None
class Macro(CitationStylesElement, Parent):
def process(self, item, context=None, sort_options=None):
return self.process_children(item, context=context,
sort_options=sort_options)
def render(self, item, context=None, sort_options=None):
return self.render_children(item, context=context,
sort_options=sort_options)
class Layout(CitationStylesElement, Parent, Formatted, Affixed, Delimited):
def render_citation(self, citation, callback):
# first sort citation items according to bibliography order
bibliography = citation.bibliography
good_cites = [cite for cite in citation.cites if not cite.is_bad()]
bad_cites = [cite for cite in citation.cites if cite.is_bad()]
good_cites.sort(key=lambda item: bibliography.keys.index(item.key))
# sort using citation/sort element
if self.getparent().sort is not None:
good_cites = self.getparent().sort.sort(good_cites, self)
out = []
for item in good_cites:
self.repressed = {}
prefix = item.get('prefix', '')
suffix = item.get('suffix', '')
try:
output = self.render_children(item)
if output is not None:
text = prefix + output + suffix
out.append(text)
self.getparent().cites.append(item)
except VariableError:
pass
for item in bad_cites:
callback_value = callback(item)
out.append(callback_value or '{}?'.format(item.key))
return self.format(self.wrap(self.join(out)))
def sort_bibliography(self, citation_items):
sort = self.getparent().find('cs:sort', self.nsmap)
if sort is not None:
citation_items = sort.sort(citation_items, self)
return citation_items
def render_bibliography(self, citation_items):
output_items = []
for item in citation_items:
self.repressed = {}
text = self.format(self.wrap(self.render_children(item)))
if text is not None:
output_items.append(text)
return output_items
class Text(CitationStylesElement, Formatted, Affixed, Quoted, TextCased,
StrippedPeriods):
generated_variables = ('year-suffix', 'citation-number')
def calls_variable(self):
if 'variable' in self.attrib:
return self.get('variable') not in self.generated_variables
elif 'macro' in self.attrib:
return self.get_macro(self.get('macro')).calls_variable()
else:
return False
def render(self, *args, **kwargs):
text, language = self.process(*args, **kwargs)
return self.markup(text, language)
def process(self, item, context=None, **kwargs):
if context is None:
context = self
try:
language = item.reference.language[:2]
except VariableError:
language = self.get_root().get('default-locale', 'en')[:2]
if 'variable' in self.attrib:
text = self._variable(item, context)
elif 'macro' in self.attrib:
text = self.get_macro(self.get('macro')).render(item, context)
elif 'term' in self.attrib:
text = self._term(item)
elif 'value' in self.attrib:
text = String(self.preformat(self.get('value')))
return text, language
def _variable(self, item, context):
variable = self.get('variable')
repressed = context.get_layout().repressed
if self.tag in repressed and variable in repressed[self.tag]:
return None
if self.get('form') == 'short':
short_variable = variable + '-short'
if short_variable.replace('-', '_') in item.reference:
variable = short_variable
if variable == 'page':
text = self._page(item, context)
elif variable == 'citation-number':
text = item.number
elif variable == 'locator':
en_dash = self.unicode_character('EN DASH')
text = str(item.locator.identifier).replace('-', en_dash)
elif variable == 'page-first':
text = str(item.reference.page.first)
else:
text = item.reference[variable.replace('-', '_')]
return text
def _page(self, item, context):
page = item.reference.page
str_first = str(page.first)
text = str_first
if 'last' in page:
str_last = str(page.last)
text += self.unicode_character('EN DASH')
if len(str_first) != len(str_last):
text += str_last
else:
range_fmt = self.get_root().get_option('page-range-format')
text += self._page_format_last(str_first, str_last, range_fmt)
return text
@staticmethod
def _page_format_last(first, last, range_format):
def find_common(first, last):
for count, (f, l) in enumerate(zip(first, last)):
if f != l:
return count
return count + 1
common = find_common(first, last)
if range_format == 'chicago':
m = re.search('\d+', first)
first_number = int(m.group())
if first_number < 100 or first_number % 100 == 0:
range_format = 'expanded'
elif len(first) >= 4 and common < 2:
range_format = 'expanded'
elif first_number % 100 in range(1, 10):
range_format = 'minimal'
elif first_number % 100 in range(10, 100):
range_format = 'minimal-two'
if range_format in ('expanded', None):
index = 0
elif range_format == 'minimal':
index = common
elif range_format == 'minimal-two':
index = min(common, len(first) - 2)
return last[index:]
def _term(self, item):
form = self.get('form', 'long')
plural = self.get('plural', 'false').lower() == 'true'
if form == 'long':
form = None
term = self.get_term(self.get('term'), form)
if plural:
text = term.multiple
else:
text = term.single
return text
def markup(self, text, language):
if text:
tmp = self.format(self.case(self.strip_periods(text), language))
return self.wrap(self.quote(tmp))
else:
return None
class Date(CitationStylesElement, Parent, Formatted, Affixed, Delimited):
def calls_variable(self):
return True
def is_locale_date(self):
expr = './ancestor::cs:locale[1]'
try:
self.xpath_search(expr)[0]
return True
except IndexError:
return False
def render_single_date(self, date, show_parts=None, context=None):
form = self.get('form')
if context != self:
parts = self.parts(date, show_parts, context)
else:
parts = self.parts(date, show_parts)
if parts:
style_context = context if self.is_locale_date() else self
return style_context.join(parts)
else:
return None
def render_date_range(self, date_range, show_parts=None, context=None):
same_show_parts = []
if date_range.end.is_nil():
same = None
diff_begin = self.render_single_date(date_range.begin, show_parts,
context)
diff_end = ''
else:
if date_range.begin.year == date_range.end.year:
show_parts.remove('year')
same_show_parts.append('year')
try:
if ('month' in show_parts and
date_range.begin.month == date_range.end.month):
show_parts.remove('month')
same_show_parts.append('month')
try:
if ('day' in show_parts and
date_range.begin.day == date_range.end.day):
show_parts.remove('day')
same_show_parts.append('day')
except AttributeError:
show_parts.remove('day')
except AttributeError:
show_parts.remove('month')
same = self.render_single_date(date_range.end, same_show_parts,
context)
diff_begin = self.render_single_date(date_range.begin, show_parts,
context)
diff_end = self.render_single_date(date_range.end, show_parts,
context)
if not (diff_begin and diff_begin):
return None
diff = (diff_begin.rstrip() + self.unicode_character('EN DASH') +
diff_end)
if same:
text = context.join([diff, same.rstrip()])
else:
text = diff
return text
def process(self, item, variable=None, show_parts=None, context=None,
**kwargs):
if variable is None:
variable = self.get('variable')
if show_parts is None:
show_parts = ['year', 'month', 'day']
if context is None:
context = self
form = self.get('form')
date_parts = self.get('date-parts')
if not self.is_locale_date() and form is not None:
localized_date = self.get_date(form)
if date_parts is not None:
show_parts = date_parts.split('-')
return localized_date.render(item, variable,
show_parts=show_parts, context=self)
else:
date_or_range = item.reference[variable.replace('-', '_')]
if not date_or_range:
text = None
elif isinstance(date_or_range, LiteralDate):
text = date_or_range.text
elif isinstance(date_or_range, DateRange):
text = self.render_date_range(date_or_range, show_parts,
context)
else:
text = self.render_single_date(date_or_range, show_parts,
context)
if text is not None:
style_context = context if self.is_locale_date() else self
return style_context.wrap(text)
else:
return None
def parts(self, date, show_parts, context=None):
output = []
for part in self.iterchildren():
if part.get('name') in show_parts:
try:
part_text = part.render(date, context)
if part_text is not None:
output.append(part_text)
except VariableError:
pass
return output
def markup(self, text):
# TODO: fix
return text
class Date_Part(CitationStylesElement, Formatted, Affixed, TextCased,
StrippedPeriods):
def process(self, date, context=None):
name = self.get('name')
range_delimiter = self.get('range-delimiter', '-')
attrib = self.attrib
if context is None:
context = self
try:
expr = './cs:date-part[@name="{}"]'.format(name)
attrib.update(dict(context.xpath_search(expr)[0].attrib))
except (AttributeError, IndexError):
pass
if name == 'day':
form = self.get('form', 'numeric')
if (form == 'ordinal'
and self.get_locale_option('limit-day-ordinals-to-day-1')
.lower() == 'true'
and date.day > 1):
form = 'numeric'
if form == 'numeric':
text = date.day
elif form == 'numeric-leading-zeros':
text = '{:02}'.format(date.day)
elif form == 'ordinal':
text = to_ordinal(date.day, context)
elif name == 'month':
form = self.get('form', 'long')
strip_periods = self.get('form', False)
try:
index = date.month
term = 'month'
except VariableError:
index = date.season
term = 'season'
if form == 'long':
text = context.get_term('{}-{:02}'.format(term, index)).single
elif form == 'short':
term = context.get_term('{}-{:02}'.format(term, index), 'short')
text = term.single
else:
assert term == 'month'
if form == 'numeric':
text = '{}'.format(index)
elif form == 'numeric-leading-zeros':
text = '{:02}'.format(index)
elif name == 'year':
form = self.get('form', 'long')
if form == 'long':
text = str(abs(date.year))
if date.year < 0:
text += context.get_term('bc').single
elif date.year < 1000:
text += context.get_term('ad').single
elif form == 'short':
text = str(date.year)[-2:]
return text
def markup(self, text):
if text:
return self.wrap(self.format(self.case(self.strip_periods(text))))
else:
return None
class Number(CitationStylesElement, Formatted, Affixed, Displayed, TextCased,
StrippedPeriods):
re_numeric = re.compile(r'^(\d+).*')
re_range = re.compile(r'^(\d+)\s*-\s*(\d+)$')
def calls_variable(self):
return True
def process(self, item, context=None, **kwargs):
form = self.get('form', 'numeric')
variable = self.get('variable')
if variable == 'locator':
try:
variable = item.locator.identifier
except KeyError:
return None
elif variable == 'page-first':
variable = item.reference.page.first
else:
variable = item.reference[variable]
try:
first, last = map(int, self.re_range.match(str(variable)).groups())
first = self.format_number(first, form)
last = self.format_number(last, form)
text = first + self.unicode_character('EN DASH') + last
except AttributeError:
try:
number = int(self.re_numeric.match(str(variable)).group(1))
text = self.format_number(number, form)
except AttributeError:
text = variable
except TypeError:
text = str(variable)
return text
def format_number(self, number, form):
if form == 'numeric':
text = str(number)
elif form == 'ordinal' or form == 'long-ordinal' and number > 10:
text = to_ordinal(number, self)
elif form == 'long-ordinal':
text = self.get_term('long-ordinal-{:02}'.format(number)).single
elif form == 'roman':
text = romanize(number).lower()
return text
def markup(self, text):
if text:
return self.wrap(self.format(self.case(self.strip_periods(text))))
else:
return None
class Names(CitationStylesElement, Parent, Formatted, Affixed, Delimited):
def calls_variable(self):
return True
def get_parent_delimiter(self, context=None):
expr = './ancestor::*[self::cs:citation or self::cs:bibliography][1]'
if context is None:
context = self
parent = context.xpath_search(expr)[0]
return parent.get_option('names-delimiter')
def substitute(self):
expr = './cs:substitute[1]'
try:
result = self.xpath_search(expr)[0]
except IndexError:
result = None
return result
def process(self, item, names_context=None, context=None, **kwargs):
if context is None:
context = self
if names_context is None:
names_context = self
roles = self.get('variable').split()
try:
ed_trans = (set(roles) == set(['editor', 'translator']) and
item.reference.editor == item.reference.translator and
self.get_term('editortranslator').getchildren())
if ed_trans:
roles = ['editor']
except VariableError:
ed_trans = False
output = []
for role in roles:
if role in item.reference:
name_elem = names_context.name
if name_elem is None:
name_elem = Name()
names_context.insert(0, name_elem)
text = name_elem.render(item, role, context=context, **kwargs)
plural = len(item.reference[role]) > 1
try:
if ed_trans:
role = 'editortranslator'
label_element = names_context.label
label = label_element.render(item, role, plural, **kwargs)
if label is not None:
if label_element is names_context.getchildren()[0]:
text = label + text
else:
text = text + label
except AttributeError:
pass
output.append(text)
if output:
try:
total = sum(output)
except TypeError:
is_int = False
else:
is_int = isinstance(total, int)
if is_int:
text = str(total) if total > 0 else None
else:
text = self.join(output, self.get_parent_delimiter(context))
else:
substitute = self.substitute()
if substitute is not None:
text = substitute.render(item, context=context, **kwargs)
try:
return text
except NameError:
raise VariableError
def markup(self, text):
if text:
return self.wrap(self.format(text))
else:
return None
class Name(CitationStylesElement, Formatted, Affixed, Delimited):
def get_option(self, name, context=None, sort_options=None):
try:
value = sort_options[name]
except (TypeError, KeyError):
expr = ('./ancestor::*[self::cs:citation or '
'self::cs:bibliography][1]')
if context is None:
context = self
parent = context.xpath_search(expr)[0]
if name in ('form', 'delimiter'):
value = self.get(name, parent.get_option('name-' + name))
else:
value = self.get(name, parent.get_option(name))
if name in ('initialize-with-hyphen', 'et-al-use-last'):
value = value.lower() == 'true'
elif name.startswith('et-al'):
value = int(value)
return value
def et_al(self):
expr = './following-sibling::cs:et-al[1]'
try:
result = self.xpath_search(expr)[0].render()
except IndexError:
result = self.get_term('et-al').single
return result
def process(self, item, variable, context=None, sort_options=None, **kwargs):
def get_option(name):
return self.get_option(name, context, sort_options)
and_ = get_option('and')
delimiter = get_option('delimiter')
delimiter_precedes_et_al = get_option('delimiter-precedes-et-al')
delimiter_precedes_last = get_option('delimiter-precedes-last')
et_al_min = get_option('et-al-min')
et_al_use_first = get_option('et-al-use-first')
et_al_subseq_min = get_option('et-al-subsequent-min')
et_al_subseq_use_first = get_option('et-al-subsequent-use-first')
et_al_use_last = get_option('et-al-use-last')
initialize_with = get_option('initialize-with')
name_as_sort_order = get_option('name-as-sort-order')
sort_separator = get_option('sort-separator')
form = get_option('form')
demote_ndp = get_option('demote-non-dropping-particle')
def format_name_parts(given, family):
for part in self.findall('cs:name-part', self.nsmap):
given, family = part.format_part(given, family)
return given, family
names = item.reference.get(variable, [])
if and_ == 'text':
and_term = self.get_term('and').single
elif and_ == 'symbol':
and_term = self.preformat('&')
et_al = self.et_al()
output = []
if form == 'count':
count = min(len(names), et_al_use_first)
output.append(count)
return sum(output)
else:
et_al_truncate = (len(names) > 1 and et_al_min and
len(names) >= et_al_min)
et_al_last = et_al_use_last and et_al_use_first <= et_al_min - 2
if et_al_truncate:
if et_al_last:
names = names[:et_al_use_first] + [names[-1]]
else:
names = names[:et_al_use_first]
for i, name in enumerate(names):
given, family, dp, ndp, suffix = name.parts()
if given is not None and initialize_with is not None:
given = self.initialize(given, initialize_with, context)
if form == 'long':
if (name_as_sort_order == 'all' or
(name_as_sort_order == 'first' and i == 0)):
if demote_ndp in ('never', 'sort-only'):
family = ' '.join([n for n in (ndp, family) if n])
given = ' '.join([n for n in (given, dp) if n])
else:
given = ' '.join([n for n in (given, dp, ndp) if n])
given, family = format_name_parts(given, family)
order = family, given, suffix
text = sort_separator.join([n for n in order if n])
else:
family = ' '.join([n for n in (dp, ndp, family) if n])
given, family = format_name_parts(given, family)
order = given, family, suffix
text = ' '.join([n for n in order if n])
elif form == 'short':
family = ' '.join([n for n in (ndp, family) if n])
given, family = format_name_parts(given, family)
text = family
output.append(text)
if et_al_truncate and et_al:
if et_al_last:
ellipsis = self.unicode_character('horizontal ellipsis')
output[-1] = ellipsis + ' ' + output[-1]
text = self.join(output, delimiter)
elif (delimiter_precedes_et_al == 'always' or
(delimiter_precedes_et_al == 'contextual' and
len(output) >= 2)):
output.append(et_al)
text = self.join(output, delimiter)
else:
text = self.join(output, delimiter) + ' ' + et_al
elif and_ is not None and len(output) > 1:
text = self.join(output[:-1], ', ')
if (delimiter_precedes_last == 'always' or
(delimiter_precedes_last == 'contextual' and
len(output) > 2)):
text = self.join([text, ''])
else:
text += ' '
text += '{} '.format(and_term) + output[-1]
else:
text = self.join(output, delimiter)
return text
def initialize(self, given, mark, context):
if self.get_option('initialize-with-hyphen', context):
hyphen_parts = given.split('-')
else:
hyphen_parts = [given.replace('-', ' ')]
result_parts = []
for hyphen_part in hyphen_parts:
parts = hyphen_part.replace('.', ' ').split()
hyphen_result = ''
group = []
for part in parts:
if part[0].isupper():
group.append(part[0])
else:
# don't initialize particles (which aren't capitalized)
hyphen_result += mark.join(group) + mark + ' ' + part + ' '
group = []
hyphen_result += mark.join(group) + mark
# remove double spaces
hyphen_result = ' '.join(hyphen_result.split())
result_parts.append(hyphen_result)
return '-'.join(result_parts)
def markup(self, text):
if text:
return self.wrap(self.format(text))
else:
return None
class Name_Part(CitationStylesElement, Formatted, Affixed, TextCased):
def format_part(self, given, family):
if self.get('name') == 'given':
given = self.wrap(self.format(self.case(given)))
elif self.get('name') == 'family':
family = self.wrap(self.format(self.case(family)))
return given, family
class Et_Al(CitationStylesElement, Formatted, Affixed):
def process(self):
variable = self.get('term', 'et-al')
term = self.get_term('variable')
return term
def markup(self, text):
if text:
return self.wrap(self.format(text))
else:
return None
class Substitute(CitationStylesElement, Parent):
def render(self, item, context=None, **kwargs):
for child in self.getchildren():
try:
if isinstance(child, Names) and child.name is None:
names = self.xpath_search('./parent::cs:names[1]')[0]
text = child.render(item, names_context=names,
context=context, **kwargs)
else:
text = child.render(item, context=context, **kwargs)
except VariableError:
continue
if text:
self.add_to_repressed_list(child, context)
break
try:
return text
except NameError:
return None
def add_to_repressed_list(self, child, context):
layout = context.get_layout()
tag_list = layout.repressed.get(child.tag, [])
tag_list.append(child.get('variable'))
layout.repressed[child.tag] = tag_list
class Label(CitationStylesElement, Formatted, Affixed, StrippedPeriods,
TextCased):
def calls_variable(self):
return self.get('variable') == 'locator'
def process(self, item, variable=None, plural=None, context=None, **kwargs):
if variable is None:
variable = self.get('variable')
form = self.get('form', 'long')
plural_option = self.get('plural', 'contextual')
if plural is None:
plural = self._is_plural(item)
if variable == 'locator' and item.has_locator:
variable = item.locator.label
if form == 'long':
term = self.get_term(variable)
else:
term = self.get_term(variable, form)
if (plural_option == 'contextual' and plural or
plural_option == 'always'):
text = term.multiple
else:
text = term.single
return text
def markup(self, text):
if text:
return self.wrap(self.format(self.case(self.strip_periods(text))))
else:
return None
RE_MULTIPLE_NUMBERS = re.compile(r'\d+[^\d]+\d+')
def _is_plural(self, item):
variable = self.get('variable')
if variable == 'locator':
value = item.locator.identifier
else:
try:
value = item.reference[variable.replace('-', '_')]
except VariableError:
return False
if variable.startswith('number-of') and int(item[variable]) > 1:
return True
else:
return self.RE_MULTIPLE_NUMBERS.search(str(value)) is not None
class Group(CitationStylesElement, Parent, Formatted, Affixed, Delimited):
def calls_variable(self):
return True
def process(self, item, context=None, **kwargs):
output = []
variable_called = False
variable_rendered = False
for child in self.iterchildren():
variable_called = variable_called or child.calls_variable()
try:
child_text = child.render(item, context=context, **kwargs)
if child_text is not None:
output.append(child_text)
variable_rendered = (variable_rendered or
child.calls_variable())
except VariableError:
pass
output = [item for item in output if item is not None]
success = not variable_called or (variable_called and variable_rendered)
if output and success:
return self.join(output)
else:
raise VariableError
def markup(self, text):
if text:
return self.wrap(self.format(text))
else:
return None
class ConditionFailed(Exception):
pass
class Choose(CitationStylesElement, Parent):
def render(self, item, context=None, **kwargs):
for child in self.getchildren():
try:
return child.render(item, context=context, **kwargs)
except ConditionFailed:
continue
return None
class If(CitationStylesElement, Parent):
def render(self, item, context=None, **kwargs):
# TODO self.get('disambiguate')
results = []
if 'type' in self.attrib:
results += self._type(item)
if 'variable' in self.attrib:
results += self._variable(item)
if 'is-numeric' in self.attrib:
results += self._is_numeric(item)
if 'is-uncertain-date' in self.attrib:
results += self._is_uncertain_date(item)
if 'locator' in self.attrib:
results += self._locator(item)
if 'position' in self.attrib:
results += self._position(item, context)
# TODO: 'match' also applies to individual tests above!
if self.get('match') == 'any':
result = any(results)
elif self.get('match') == 'none':
result = not any(results)
else:
result = all(results)
if not result:
raise ConditionFailed
return self.render_children(item, context=context, **kwargs)
def _type(self, item):
return [typ.lower() == item.reference.type
for typ in self.get('type').split()]
def _variable(self, item):
variables = [var.replace('-', '_')
for var in self.get('variable').split()]
output = []
for variable in variables:
if variable == 'locator':
output.append('locator' in item)
else:
output.append(variable in item.reference)
return output
def _is_numeric(self, item):
numeric_match = Number.re_numeric.match
return [var.replace('-', '_') in item.reference and
numeric_match(str(item.reference[var.replace('-', '_')]))
for var in self.get('is-numeric').split()]
def _is_uncertain_date(self, item):
result = []
for date in self.get('is-uncertain-date').split():
date_variable = date.replace('-', '_')
try:
circa = item.reference[date_variable].get('circa', False)
except VariableError:
circa = False
result.append(circa)
return result
def _locator(self, item):
return [var.replace('-', ' ') == item.locator.label
for var in self.get('locator').split()]
def _position(self, item, context):
if context is None:
context = self
if context.xpath_search('./ancestor::*[self::cs:bibliography]'):
return [False]
# citation node
cites = context.get_layout().getparent().cites
last_cite = cites[-1] if cites else None
already_cited = item.key in (cite.key for cite in cites)
possibly_ibid = (already_cited
and item.key == last_cite.key
and (item.citation is last_cite.citation
or len(last_cite.citation.cites) == 1))
results = []
for position in self.get('position').split():
result = False
if position == 'first':
result = not already_cited
elif position == 'subsequent':
result = already_cited
elif possibly_ibid and position == 'ibid':
result = item.has_locator or not last_cite.has_locator
elif possibly_ibid and position == 'ibid-with-locator':
result = (item.has_locator and not last_cite.has_locator
or (item.has_locator and last_cite.has_locator
and item.locator != last_cite.locator))
elif already_cited and position == 'near-note':
max_distance = self.get_root().get_option('near-note-distance')
citations = 1
last_citation = None
for cite in reversed(cites):
if item.key == cite.key and citations <= max_distance:
result = True
break
elif cite.citation is not last_citation:
citations += 1
last_citation = cite.citation
results.append(result)
return results
class Else_If(If, CitationStylesElement):
pass
class Else(CitationStylesElement, Parent):
def render(self, item, context=None, **kwargs):
return self.render_children(item, context=context, **kwargs)
# utility functions
def to_ordinal(number, context):
number = str(number)
last_digit = int(number[-1])
if last_digit in (1, 2, 3) and not (len(number) > 1 and number[-2] == '1'):
ordinal_term = 'ordinal-{:02}'.format(last_digit)
else:
ordinal_term = 'ordinal-04'
return number + context.get_term(ordinal_term).single
def romanize(n):
# by Kay Schluehr - from http://billmill.org/python_roman.html
numerals = (('M', 1000), ('CM', 900), ('D', 500), ('CD', 400),
('C', 100),('XC', 90),('L', 50),('XL', 40), ('X', 10),
('IX', 9), ('V', 5), ('IV', 4), ('I', 1))
roman = []
for ltr, num in numerals:
(k, n) = divmod(n, num)
roman.append(ltr * k)
return ''.join(roman)
| {
"repo_name": "jayvdb/citeproc-py",
"path": "citeproc/model.py",
"copies": "2",
"size": "56622",
"license": "bsd-2-clause",
"hash": -190875467550723460,
"line_mean": 35.3894601542,
"line_max": 81,
"alpha_frac": 0.5151884427,
"autogenerated": false,
"ratio": 4.2980112342492784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5813199676949278,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import OrderedDict
import importlib
import sys
import six
def watermark():
"""
Give the version of each of the dependencies -- useful for bug reports.
Returns
-------
result : dict
mapping the name of each package to its version string or, if an
optional dependency is not installed, None
"""
packages = ['six', 'numpy', 'scipy', 'matplotlib', 'pandas', 'pims',
'metadatastore', 'filestore',
'channelarchiver', 'xray_vision']
result = OrderedDict()
for package_name in packages:
try:
package = importlib.import_module(package_name)
version = package.__version__
except ImportError:
result[package_name] = None
except Exception as err:
version = "FAILED TO DETECT: {0}".format(err)
result[package_name] = version
# ...as does Python
version_info = sys.version_info
result['python'] = _make_version_string(version_info)
return result
def _make_version_string(version_info):
version_string = '.'.join(map(str, [version_info[0], version_info[1],
version_info[2]]))
return version_string
| {
"repo_name": "ericdill/databroker",
"path": "databroker/utils/diagnostics.py",
"copies": "1",
"size": "1336",
"license": "bsd-3-clause",
"hash": 5386953142595245000,
"line_mean": 31.5853658537,
"line_max": 75,
"alpha_frac": 0.6032934132,
"autogenerated": false,
"ratio": 4.409240924092409,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006097560975609756,
"num_lines": 41
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import OrderedDict
import six
BASE_COLORS = {
'b': (0, 0, 1),
'g': (0, 0.5, 0),
'r': (1, 0, 0),
'c': (0, 0.75, 0.75),
'm': (0.75, 0, 0.75),
'y': (0.75, 0.75, 0),
'k': (0, 0, 0),
'w': (1, 1, 1)}
# These colors are from Tableau
TABLEAU_COLORS = (
('blue', '#1f77b4'),
('orange', '#ff7f0e'),
('green', '#2ca02c'),
('red', '#d62728'),
('purple', '#9467bd'),
('brown', '#8c564b'),
('pink', '#e377c2'),
('gray', '#7f7f7f'),
('olive', '#bcbd22'),
('cyan', '#17becf'),
)
# Normalize name to "tab:<name>" to avoid name collisions.
TABLEAU_COLORS = OrderedDict(
('tab:' + name, value) for name, value in TABLEAU_COLORS)
# This mapping of color names -> hex values is taken from
# a survey run by Randel Monroe see:
# http://blog.xkcd.com/2010/05/03/color-survey-results/
# for more details. The results are hosted at
# https://xkcd.com/color/rgb.txt
#
# License: http://creativecommons.org/publicdomain/zero/1.0/
XKCD_COLORS = {
'cloudy blue': '#acc2d9',
'dark pastel green': '#56ae57',
'dust': '#b2996e',
'electric lime': '#a8ff04',
'fresh green': '#69d84f',
'light eggplant': '#894585',
'nasty green': '#70b23f',
'really light blue': '#d4ffff',
'tea': '#65ab7c',
'warm purple': '#952e8f',
'yellowish tan': '#fcfc81',
'cement': '#a5a391',
'dark grass green': '#388004',
'dusty teal': '#4c9085',
'grey teal': '#5e9b8a',
'macaroni and cheese': '#efb435',
'pinkish tan': '#d99b82',
'spruce': '#0a5f38',
'strong blue': '#0c06f7',
'toxic green': '#61de2a',
'windows blue': '#3778bf',
'blue blue': '#2242c7',
'blue with a hint of purple': '#533cc6',
'booger': '#9bb53c',
'bright sea green': '#05ffa6',
'dark green blue': '#1f6357',
'deep turquoise': '#017374',
'green teal': '#0cb577',
'strong pink': '#ff0789',
'bland': '#afa88b',
'deep aqua': '#08787f',
'lavender pink': '#dd85d7',
'light moss green': '#a6c875',
'light seafoam green': '#a7ffb5',
'olive yellow': '#c2b709',
'pig pink': '#e78ea5',
'deep lilac': '#966ebd',
'desert': '#ccad60',
'dusty lavender': '#ac86a8',
'purpley grey': '#947e94',
'purply': '#983fb2',
'candy pink': '#ff63e9',
'light pastel green': '#b2fba5',
'boring green': '#63b365',
'kiwi green': '#8ee53f',
'light grey green': '#b7e1a1',
'orange pink': '#ff6f52',
'tea green': '#bdf8a3',
'very light brown': '#d3b683',
'egg shell': '#fffcc4',
'eggplant purple': '#430541',
'powder pink': '#ffb2d0',
'reddish grey': '#997570',
'baby shit brown': '#ad900d',
'liliac': '#c48efd',
'stormy blue': '#507b9c',
'ugly brown': '#7d7103',
'custard': '#fffd78',
'darkish pink': '#da467d',
'deep brown': '#410200',
'greenish beige': '#c9d179',
'manilla': '#fffa86',
'off blue': '#5684ae',
'battleship grey': '#6b7c85',
'browny green': '#6f6c0a',
'bruise': '#7e4071',
'kelley green': '#009337',
'sickly yellow': '#d0e429',
'sunny yellow': '#fff917',
'azul': '#1d5dec',
'darkgreen': '#054907',
'green/yellow': '#b5ce08',
'lichen': '#8fb67b',
'light light green': '#c8ffb0',
'pale gold': '#fdde6c',
'sun yellow': '#ffdf22',
'tan green': '#a9be70',
'burple': '#6832e3',
'butterscotch': '#fdb147',
'toupe': '#c7ac7d',
'dark cream': '#fff39a',
'indian red': '#850e04',
'light lavendar': '#efc0fe',
'poison green': '#40fd14',
'baby puke green': '#b6c406',
'bright yellow green': '#9dff00',
'charcoal grey': '#3c4142',
'squash': '#f2ab15',
'cinnamon': '#ac4f06',
'light pea green': '#c4fe82',
'radioactive green': '#2cfa1f',
'raw sienna': '#9a6200',
'baby purple': '#ca9bf7',
'cocoa': '#875f42',
'light royal blue': '#3a2efe',
'orangeish': '#fd8d49',
'rust brown': '#8b3103',
'sand brown': '#cba560',
'swamp': '#698339',
'tealish green': '#0cdc73',
'burnt siena': '#b75203',
'camo': '#7f8f4e',
'dusk blue': '#26538d',
'fern': '#63a950',
'old rose': '#c87f89',
'pale light green': '#b1fc99',
'peachy pink': '#ff9a8a',
'rosy pink': '#f6688e',
'light bluish green': '#76fda8',
'light bright green': '#53fe5c',
'light neon green': '#4efd54',
'light seafoam': '#a0febf',
'tiffany blue': '#7bf2da',
'washed out green': '#bcf5a6',
'browny orange': '#ca6b02',
'nice blue': '#107ab0',
'sapphire': '#2138ab',
'greyish teal': '#719f91',
'orangey yellow': '#fdb915',
'parchment': '#fefcaf',
'straw': '#fcf679',
'very dark brown': '#1d0200',
'terracota': '#cb6843',
'ugly blue': '#31668a',
'clear blue': '#247afd',
'creme': '#ffffb6',
'foam green': '#90fda9',
'grey/green': '#86a17d',
'light gold': '#fddc5c',
'seafoam blue': '#78d1b6',
'topaz': '#13bbaf',
'violet pink': '#fb5ffc',
'wintergreen': '#20f986',
'yellow tan': '#ffe36e',
'dark fuchsia': '#9d0759',
'indigo blue': '#3a18b1',
'light yellowish green': '#c2ff89',
'pale magenta': '#d767ad',
'rich purple': '#720058',
'sunflower yellow': '#ffda03',
'green/blue': '#01c08d',
'leather': '#ac7434',
'racing green': '#014600',
'vivid purple': '#9900fa',
'dark royal blue': '#02066f',
'hazel': '#8e7618',
'muted pink': '#d1768f',
'booger green': '#96b403',
'canary': '#fdff63',
'cool grey': '#95a3a6',
'dark taupe': '#7f684e',
'darkish purple': '#751973',
'true green': '#089404',
'coral pink': '#ff6163',
'dark sage': '#598556',
'dark slate blue': '#214761',
'flat blue': '#3c73a8',
'mushroom': '#ba9e88',
'rich blue': '#021bf9',
'dirty purple': '#734a65',
'greenblue': '#23c48b',
'icky green': '#8fae22',
'light khaki': '#e6f2a2',
'warm blue': '#4b57db',
'dark hot pink': '#d90166',
'deep sea blue': '#015482',
'carmine': '#9d0216',
'dark yellow green': '#728f02',
'pale peach': '#ffe5ad',
'plum purple': '#4e0550',
'golden rod': '#f9bc08',
'neon red': '#ff073a',
'old pink': '#c77986',
'very pale blue': '#d6fffe',
'blood orange': '#fe4b03',
'grapefruit': '#fd5956',
'sand yellow': '#fce166',
'clay brown': '#b2713d',
'dark blue grey': '#1f3b4d',
'flat green': '#699d4c',
'light green blue': '#56fca2',
'warm pink': '#fb5581',
'dodger blue': '#3e82fc',
'gross green': '#a0bf16',
'ice': '#d6fffa',
'metallic blue': '#4f738e',
'pale salmon': '#ffb19a',
'sap green': '#5c8b15',
'algae': '#54ac68',
'bluey grey': '#89a0b0',
'greeny grey': '#7ea07a',
'highlighter green': '#1bfc06',
'light light blue': '#cafffb',
'light mint': '#b6ffbb',
'raw umber': '#a75e09',
'vivid blue': '#152eff',
'deep lavender': '#8d5eb7',
'dull teal': '#5f9e8f',
'light greenish blue': '#63f7b4',
'mud green': '#606602',
'pinky': '#fc86aa',
'red wine': '#8c0034',
'shit green': '#758000',
'tan brown': '#ab7e4c',
'darkblue': '#030764',
'rosa': '#fe86a4',
'lipstick': '#d5174e',
'pale mauve': '#fed0fc',
'claret': '#680018',
'dandelion': '#fedf08',
'orangered': '#fe420f',
'poop green': '#6f7c00',
'ruby': '#ca0147',
'dark': '#1b2431',
'greenish turquoise': '#00fbb0',
'pastel red': '#db5856',
'piss yellow': '#ddd618',
'bright cyan': '#41fdfe',
'dark coral': '#cf524e',
'algae green': '#21c36f',
'darkish red': '#a90308',
'reddy brown': '#6e1005',
'blush pink': '#fe828c',
'camouflage green': '#4b6113',
'lawn green': '#4da409',
'putty': '#beae8a',
'vibrant blue': '#0339f8',
'dark sand': '#a88f59',
'purple/blue': '#5d21d0',
'saffron': '#feb209',
'twilight': '#4e518b',
'warm brown': '#964e02',
'bluegrey': '#85a3b2',
'bubble gum pink': '#ff69af',
'duck egg blue': '#c3fbf4',
'greenish cyan': '#2afeb7',
'petrol': '#005f6a',
'royal': '#0c1793',
'butter': '#ffff81',
'dusty orange': '#f0833a',
'off yellow': '#f1f33f',
'pale olive green': '#b1d27b',
'orangish': '#fc824a',
'leaf': '#71aa34',
'light blue grey': '#b7c9e2',
'dried blood': '#4b0101',
'lightish purple': '#a552e6',
'rusty red': '#af2f0d',
'lavender blue': '#8b88f8',
'light grass green': '#9af764',
'light mint green': '#a6fbb2',
'sunflower': '#ffc512',
'velvet': '#750851',
'brick orange': '#c14a09',
'lightish red': '#fe2f4a',
'pure blue': '#0203e2',
'twilight blue': '#0a437a',
'violet red': '#a50055',
'yellowy brown': '#ae8b0c',
'carnation': '#fd798f',
'muddy yellow': '#bfac05',
'dark seafoam green': '#3eaf76',
'deep rose': '#c74767',
'dusty red': '#b9484e',
'grey/blue': '#647d8e',
'lemon lime': '#bffe28',
'purple/pink': '#d725de',
'brown yellow': '#b29705',
'purple brown': '#673a3f',
'wisteria': '#a87dc2',
'banana yellow': '#fafe4b',
'lipstick red': '#c0022f',
'water blue': '#0e87cc',
'brown grey': '#8d8468',
'vibrant purple': '#ad03de',
'baby green': '#8cff9e',
'barf green': '#94ac02',
'eggshell blue': '#c4fff7',
'sandy yellow': '#fdee73',
'cool green': '#33b864',
'pale': '#fff9d0',
'blue/grey': '#758da3',
'hot magenta': '#f504c9',
'greyblue': '#77a1b5',
'purpley': '#8756e4',
'baby shit green': '#889717',
'brownish pink': '#c27e79',
'dark aquamarine': '#017371',
'diarrhea': '#9f8303',
'light mustard': '#f7d560',
'pale sky blue': '#bdf6fe',
'turtle green': '#75b84f',
'bright olive': '#9cbb04',
'dark grey blue': '#29465b',
'greeny brown': '#696006',
'lemon green': '#adf802',
'light periwinkle': '#c1c6fc',
'seaweed green': '#35ad6b',
'sunshine yellow': '#fffd37',
'ugly purple': '#a442a0',
'medium pink': '#f36196',
'puke brown': '#947706',
'very light pink': '#fff4f2',
'viridian': '#1e9167',
'bile': '#b5c306',
'faded yellow': '#feff7f',
'very pale green': '#cffdbc',
'vibrant green': '#0add08',
'bright lime': '#87fd05',
'spearmint': '#1ef876',
'light aquamarine': '#7bfdc7',
'light sage': '#bcecac',
'yellowgreen': '#bbf90f',
'baby poo': '#ab9004',
'dark seafoam': '#1fb57a',
'deep teal': '#00555a',
'heather': '#a484ac',
'rust orange': '#c45508',
'dirty blue': '#3f829d',
'fern green': '#548d44',
'bright lilac': '#c95efb',
'weird green': '#3ae57f',
'peacock blue': '#016795',
'avocado green': '#87a922',
'faded orange': '#f0944d',
'grape purple': '#5d1451',
'hot green': '#25ff29',
'lime yellow': '#d0fe1d',
'mango': '#ffa62b',
'shamrock': '#01b44c',
'bubblegum': '#ff6cb5',
'purplish brown': '#6b4247',
'vomit yellow': '#c7c10c',
'pale cyan': '#b7fffa',
'key lime': '#aeff6e',
'tomato red': '#ec2d01',
'lightgreen': '#76ff7b',
'merlot': '#730039',
'night blue': '#040348',
'purpleish pink': '#df4ec8',
'apple': '#6ecb3c',
'baby poop green': '#8f9805',
'green apple': '#5edc1f',
'heliotrope': '#d94ff5',
'yellow/green': '#c8fd3d',
'almost black': '#070d0d',
'cool blue': '#4984b8',
'leafy green': '#51b73b',
'mustard brown': '#ac7e04',
'dusk': '#4e5481',
'dull brown': '#876e4b',
'frog green': '#58bc08',
'vivid green': '#2fef10',
'bright light green': '#2dfe54',
'fluro green': '#0aff02',
'kiwi': '#9cef43',
'seaweed': '#18d17b',
'navy green': '#35530a',
'ultramarine blue': '#1805db',
'iris': '#6258c4',
'pastel orange': '#ff964f',
'yellowish orange': '#ffab0f',
'perrywinkle': '#8f8ce7',
'tealish': '#24bca8',
'dark plum': '#3f012c',
'pear': '#cbf85f',
'pinkish orange': '#ff724c',
'midnight purple': '#280137',
'light urple': '#b36ff6',
'dark mint': '#48c072',
'greenish tan': '#bccb7a',
'light burgundy': '#a8415b',
'turquoise blue': '#06b1c4',
'ugly pink': '#cd7584',
'sandy': '#f1da7a',
'electric pink': '#ff0490',
'muted purple': '#805b87',
'mid green': '#50a747',
'greyish': '#a8a495',
'neon yellow': '#cfff04',
'banana': '#ffff7e',
'carnation pink': '#ff7fa7',
'tomato': '#ef4026',
'sea': '#3c9992',
'muddy brown': '#886806',
'turquoise green': '#04f489',
'buff': '#fef69e',
'fawn': '#cfaf7b',
'muted blue': '#3b719f',
'pale rose': '#fdc1c5',
'dark mint green': '#20c073',
'amethyst': '#9b5fc0',
'blue/green': '#0f9b8e',
'chestnut': '#742802',
'sick green': '#9db92c',
'pea': '#a4bf20',
'rusty orange': '#cd5909',
'stone': '#ada587',
'rose red': '#be013c',
'pale aqua': '#b8ffeb',
'deep orange': '#dc4d01',
'earth': '#a2653e',
'mossy green': '#638b27',
'grassy green': '#419c03',
'pale lime green': '#b1ff65',
'light grey blue': '#9dbcd4',
'pale grey': '#fdfdfe',
'asparagus': '#77ab56',
'blueberry': '#464196',
'purple red': '#990147',
'pale lime': '#befd73',
'greenish teal': '#32bf84',
'caramel': '#af6f09',
'deep magenta': '#a0025c',
'light peach': '#ffd8b1',
'milk chocolate': '#7f4e1e',
'ocher': '#bf9b0c',
'off green': '#6ba353',
'purply pink': '#f075e6',
'lightblue': '#7bc8f6',
'dusky blue': '#475f94',
'golden': '#f5bf03',
'light beige': '#fffeb6',
'butter yellow': '#fffd74',
'dusky purple': '#895b7b',
'french blue': '#436bad',
'ugly yellow': '#d0c101',
'greeny yellow': '#c6f808',
'orangish red': '#f43605',
'shamrock green': '#02c14d',
'orangish brown': '#b25f03',
'tree green': '#2a7e19',
'deep violet': '#490648',
'gunmetal': '#536267',
'blue/purple': '#5a06ef',
'cherry': '#cf0234',
'sandy brown': '#c4a661',
'warm grey': '#978a84',
'dark indigo': '#1f0954',
'midnight': '#03012d',
'bluey green': '#2bb179',
'grey pink': '#c3909b',
'soft purple': '#a66fb5',
'blood': '#770001',
'brown red': '#922b05',
'medium grey': '#7d7f7c',
'berry': '#990f4b',
'poo': '#8f7303',
'purpley pink': '#c83cb9',
'light salmon': '#fea993',
'snot': '#acbb0d',
'easter purple': '#c071fe',
'light yellow green': '#ccfd7f',
'dark navy blue': '#00022e',
'drab': '#828344',
'light rose': '#ffc5cb',
'rouge': '#ab1239',
'purplish red': '#b0054b',
'slime green': '#99cc04',
'baby poop': '#937c00',
'irish green': '#019529',
'pink/purple': '#ef1de7',
'dark navy': '#000435',
'greeny blue': '#42b395',
'light plum': '#9d5783',
'pinkish grey': '#c8aca9',
'dirty orange': '#c87606',
'rust red': '#aa2704',
'pale lilac': '#e4cbff',
'orangey red': '#fa4224',
'primary blue': '#0804f9',
'kermit green': '#5cb200',
'brownish purple': '#76424e',
'murky green': '#6c7a0e',
'wheat': '#fbdd7e',
'very dark purple': '#2a0134',
'bottle green': '#044a05',
'watermelon': '#fd4659',
'deep sky blue': '#0d75f8',
'fire engine red': '#fe0002',
'yellow ochre': '#cb9d06',
'pumpkin orange': '#fb7d07',
'pale olive': '#b9cc81',
'light lilac': '#edc8ff',
'lightish green': '#61e160',
'carolina blue': '#8ab8fe',
'mulberry': '#920a4e',
'shocking pink': '#fe02a2',
'auburn': '#9a3001',
'bright lime green': '#65fe08',
'celadon': '#befdb7',
'pinkish brown': '#b17261',
'poo brown': '#885f01',
'bright sky blue': '#02ccfe',
'celery': '#c1fd95',
'dirt brown': '#836539',
'strawberry': '#fb2943',
'dark lime': '#84b701',
'copper': '#b66325',
'medium brown': '#7f5112',
'muted green': '#5fa052',
"robin's egg": '#6dedfd',
'bright aqua': '#0bf9ea',
'bright lavender': '#c760ff',
'ivory': '#ffffcb',
'very light purple': '#f6cefc',
'light navy': '#155084',
'pink red': '#f5054f',
'olive brown': '#645403',
'poop brown': '#7a5901',
'mustard green': '#a8b504',
'ocean green': '#3d9973',
'very dark blue': '#000133',
'dusty green': '#76a973',
'light navy blue': '#2e5a88',
'minty green': '#0bf77d',
'adobe': '#bd6c48',
'barney': '#ac1db8',
'jade green': '#2baf6a',
'bright light blue': '#26f7fd',
'light lime': '#aefd6c',
'dark khaki': '#9b8f55',
'orange yellow': '#ffad01',
'ocre': '#c69c04',
'maize': '#f4d054',
'faded pink': '#de9dac',
'british racing green': '#05480d',
'sandstone': '#c9ae74',
'mud brown': '#60460f',
'light sea green': '#98f6b0',
'robin egg blue': '#8af1fe',
'aqua marine': '#2ee8bb',
'dark sea green': '#11875d',
'soft pink': '#fdb0c0',
'orangey brown': '#b16002',
'cherry red': '#f7022a',
'burnt yellow': '#d5ab09',
'brownish grey': '#86775f',
'camel': '#c69f59',
'purplish grey': '#7a687f',
'marine': '#042e60',
'greyish pink': '#c88d94',
'pale turquoise': '#a5fbd5',
'pastel yellow': '#fffe71',
'bluey purple': '#6241c7',
'canary yellow': '#fffe40',
'faded red': '#d3494e',
'sepia': '#985e2b',
'coffee': '#a6814c',
'bright magenta': '#ff08e8',
'mocha': '#9d7651',
'ecru': '#feffca',
'purpleish': '#98568d',
'cranberry': '#9e003a',
'darkish green': '#287c37',
'brown orange': '#b96902',
'dusky rose': '#ba6873',
'melon': '#ff7855',
'sickly green': '#94b21c',
'silver': '#c5c9c7',
'purply blue': '#661aee',
'purpleish blue': '#6140ef',
'hospital green': '#9be5aa',
'shit brown': '#7b5804',
'mid blue': '#276ab3',
'amber': '#feb308',
'easter green': '#8cfd7e',
'soft blue': '#6488ea',
'cerulean blue': '#056eee',
'golden brown': '#b27a01',
'bright turquoise': '#0ffef9',
'red pink': '#fa2a55',
'red purple': '#820747',
'greyish brown': '#7a6a4f',
'vermillion': '#f4320c',
'russet': '#a13905',
'steel grey': '#6f828a',
'lighter purple': '#a55af4',
'bright violet': '#ad0afd',
'prussian blue': '#004577',
'slate green': '#658d6d',
'dirty pink': '#ca7b80',
'dark blue green': '#005249',
'pine': '#2b5d34',
'yellowy green': '#bff128',
'dark gold': '#b59410',
'bluish': '#2976bb',
'darkish blue': '#014182',
'dull red': '#bb3f3f',
'pinky red': '#fc2647',
'bronze': '#a87900',
'pale teal': '#82cbb2',
'military green': '#667c3e',
'barbie pink': '#fe46a5',
'bubblegum pink': '#fe83cc',
'pea soup green': '#94a617',
'dark mustard': '#a88905',
'shit': '#7f5f00',
'medium purple': '#9e43a2',
'very dark green': '#062e03',
'dirt': '#8a6e45',
'dusky pink': '#cc7a8b',
'red violet': '#9e0168',
'lemon yellow': '#fdff38',
'pistachio': '#c0fa8b',
'dull yellow': '#eedc5b',
'dark lime green': '#7ebd01',
'denim blue': '#3b5b92',
'teal blue': '#01889f',
'lightish blue': '#3d7afd',
'purpley blue': '#5f34e7',
'light indigo': '#6d5acf',
'swamp green': '#748500',
'brown green': '#706c11',
'dark maroon': '#3c0008',
'hot purple': '#cb00f5',
'dark forest green': '#002d04',
'faded blue': '#658cbb',
'drab green': '#749551',
'light lime green': '#b9ff66',
'snot green': '#9dc100',
'yellowish': '#faee66',
'light blue green': '#7efbb3',
'bordeaux': '#7b002c',
'light mauve': '#c292a1',
'ocean': '#017b92',
'marigold': '#fcc006',
'muddy green': '#657432',
'dull orange': '#d8863b',
'steel': '#738595',
'electric purple': '#aa23ff',
'fluorescent green': '#08ff08',
'yellowish brown': '#9b7a01',
'blush': '#f29e8e',
'soft green': '#6fc276',
'bright orange': '#ff5b00',
'lemon': '#fdff52',
'purple grey': '#866f85',
'acid green': '#8ffe09',
'pale lavender': '#eecffe',
'violet blue': '#510ac9',
'light forest green': '#4f9153',
'burnt red': '#9f2305',
'khaki green': '#728639',
'cerise': '#de0c62',
'faded purple': '#916e99',
'apricot': '#ffb16d',
'dark olive green': '#3c4d03',
'grey brown': '#7f7053',
'green grey': '#77926f',
'true blue': '#010fcc',
'pale violet': '#ceaefa',
'periwinkle blue': '#8f99fb',
'light sky blue': '#c6fcff',
'blurple': '#5539cc',
'green brown': '#544e03',
'bluegreen': '#017a79',
'bright teal': '#01f9c6',
'brownish yellow': '#c9b003',
'pea soup': '#929901',
'forest': '#0b5509',
'barney purple': '#a00498',
'ultramarine': '#2000b1',
'purplish': '#94568c',
'puke yellow': '#c2be0e',
'bluish grey': '#748b97',
'dark periwinkle': '#665fd1',
'dark lilac': '#9c6da5',
'reddish': '#c44240',
'light maroon': '#a24857',
'dusty purple': '#825f87',
'terra cotta': '#c9643b',
'avocado': '#90b134',
'marine blue': '#01386a',
'teal green': '#25a36f',
'slate grey': '#59656d',
'lighter green': '#75fd63',
'electric green': '#21fc0d',
'dusty blue': '#5a86ad',
'golden yellow': '#fec615',
'bright yellow': '#fffd01',
'light lavender': '#dfc5fe',
'umber': '#b26400',
'poop': '#7f5e00',
'dark peach': '#de7e5d',
'jungle green': '#048243',
'eggshell': '#ffffd4',
'denim': '#3b638c',
'yellow brown': '#b79400',
'dull purple': '#84597e',
'chocolate brown': '#411900',
'wine red': '#7b0323',
'neon blue': '#04d9ff',
'dirty green': '#667e2c',
'light tan': '#fbeeac',
'ice blue': '#d7fffe',
'cadet blue': '#4e7496',
'dark mauve': '#874c62',
'very light blue': '#d5ffff',
'grey purple': '#826d8c',
'pastel pink': '#ffbacd',
'very light green': '#d1ffbd',
'dark sky blue': '#448ee4',
'evergreen': '#05472a',
'dull pink': '#d5869d',
'aubergine': '#3d0734',
'mahogany': '#4a0100',
'reddish orange': '#f8481c',
'deep green': '#02590f',
'vomit green': '#89a203',
'purple pink': '#e03fd8',
'dusty pink': '#d58a94',
'faded green': '#7bb274',
'camo green': '#526525',
'pinky purple': '#c94cbe',
'pink purple': '#db4bda',
'brownish red': '#9e3623',
'dark rose': '#b5485d',
'mud': '#735c12',
'brownish': '#9c6d57',
'emerald green': '#028f1e',
'pale brown': '#b1916e',
'dull blue': '#49759c',
'burnt umber': '#a0450e',
'medium green': '#39ad48',
'clay': '#b66a50',
'light aqua': '#8cffdb',
'light olive green': '#a4be5c',
'brownish orange': '#cb7723',
'dark aqua': '#05696b',
'purplish pink': '#ce5dae',
'dark salmon': '#c85a53',
'greenish grey': '#96ae8d',
'jade': '#1fa774',
'ugly green': '#7a9703',
'dark beige': '#ac9362',
'emerald': '#01a049',
'pale red': '#d9544d',
'light magenta': '#fa5ff7',
'sky': '#82cafc',
'light cyan': '#acfffc',
'yellow orange': '#fcb001',
'reddish purple': '#910951',
'reddish pink': '#fe2c54',
'orchid': '#c875c4',
'dirty yellow': '#cdc50a',
'orange red': '#fd411e',
'deep red': '#9a0200',
'orange brown': '#be6400',
'cobalt blue': '#030aa7',
'neon pink': '#fe019a',
'rose pink': '#f7879a',
'greyish purple': '#887191',
'raspberry': '#b00149',
'aqua green': '#12e193',
'salmon pink': '#fe7b7c',
'tangerine': '#ff9408',
'brownish green': '#6a6e09',
'red brown': '#8b2e16',
'greenish brown': '#696112',
'pumpkin': '#e17701',
'pine green': '#0a481e',
'charcoal': '#343837',
'baby pink': '#ffb7ce',
'cornflower': '#6a79f7',
'blue violet': '#5d06e9',
'chocolate': '#3d1c02',
'greyish green': '#82a67d',
'scarlet': '#be0119',
'green yellow': '#c9ff27',
'dark olive': '#373e02',
'sienna': '#a9561e',
'pastel purple': '#caa0ff',
'terracotta': '#ca6641',
'aqua blue': '#02d8e9',
'sage green': '#88b378',
'blood red': '#980002',
'deep pink': '#cb0162',
'grass': '#5cac2d',
'moss': '#769958',
'pastel blue': '#a2bffe',
'bluish green': '#10a674',
'green blue': '#06b48b',
'dark tan': '#af884a',
'greenish blue': '#0b8b87',
'pale orange': '#ffa756',
'vomit': '#a2a415',
'forrest green': '#154406',
'dark lavender': '#856798',
'dark violet': '#34013f',
'purple blue': '#632de9',
'dark cyan': '#0a888a',
'olive drab': '#6f7632',
'pinkish': '#d46a7e',
'cobalt': '#1e488f',
'neon purple': '#bc13fe',
'light turquoise': '#7ef4cc',
'apple green': '#76cd26',
'dull green': '#74a662',
'wine': '#80013f',
'powder blue': '#b1d1fc',
'off white': '#ffffe4',
'electric blue': '#0652ff',
'dark turquoise': '#045c5a',
'blue purple': '#5729ce',
'azure': '#069af3',
'bright red': '#ff000d',
'pinkish red': '#f10c45',
'cornflower blue': '#5170d7',
'light olive': '#acbf69',
'grape': '#6c3461',
'greyish blue': '#5e819d',
'purplish blue': '#601ef9',
'yellowish green': '#b0dd16',
'greenish yellow': '#cdfd02',
'medium blue': '#2c6fbb',
'dusty rose': '#c0737a',
'light violet': '#d6b4fc',
'midnight blue': '#020035',
'bluish purple': '#703be7',
'red orange': '#fd3c06',
'dark magenta': '#960056',
'greenish': '#40a368',
'ocean blue': '#03719c',
'coral': '#fc5a50',
'cream': '#ffffc2',
'reddish brown': '#7f2b0a',
'burnt sienna': '#b04e0f',
'brick': '#a03623',
'sage': '#87ae73',
'grey green': '#789b73',
'white': '#ffffff',
"robin's egg blue": '#98eff9',
'moss green': '#658b38',
'steel blue': '#5a7d9a',
'eggplant': '#380835',
'light yellow': '#fffe7a',
'leaf green': '#5ca904',
'light grey': '#d8dcd6',
'puke': '#a5a502',
'pinkish purple': '#d648d7',
'sea blue': '#047495',
'pale purple': '#b790d4',
'slate blue': '#5b7c99',
'blue grey': '#607c8e',
'hunter green': '#0b4008',
'fuchsia': '#ed0dd9',
'crimson': '#8c000f',
'pale yellow': '#ffff84',
'ochre': '#bf9005',
'mustard yellow': '#d2bd0a',
'light red': '#ff474c',
'cerulean': '#0485d1',
'pale pink': '#ffcfdc',
'deep blue': '#040273',
'rust': '#a83c09',
'light teal': '#90e4c1',
'slate': '#516572',
'goldenrod': '#fac205',
'dark yellow': '#d5b60a',
'dark grey': '#363737',
'army green': '#4b5d16',
'grey blue': '#6b8ba4',
'seafoam': '#80f9ad',
'puce': '#a57e52',
'spring green': '#a9f971',
'dark orange': '#c65102',
'sand': '#e2ca76',
'pastel green': '#b0ff9d',
'mint': '#9ffeb0',
'light orange': '#fdaa48',
'bright pink': '#fe01b1',
'chartreuse': '#c1f80a',
'deep purple': '#36013f',
'dark brown': '#341c02',
'taupe': '#b9a281',
'pea green': '#8eab12',
'puke green': '#9aae07',
'kelly green': '#02ab2e',
'seafoam green': '#7af9ab',
'blue green': '#137e6d',
'khaki': '#aaa662',
'burgundy': '#610023',
'dark teal': '#014d4e',
'brick red': '#8f1402',
'royal purple': '#4b006e',
'plum': '#580f41',
'mint green': '#8fff9f',
'gold': '#dbb40c',
'baby blue': '#a2cffe',
'yellow green': '#c0fb2d',
'bright purple': '#be03fd',
'dark red': '#840000',
'pale blue': '#d0fefe',
'grass green': '#3f9b0b',
'navy': '#01153e',
'aquamarine': '#04d8b2',
'burnt orange': '#c04e01',
'neon green': '#0cff0c',
'bright blue': '#0165fc',
'rose': '#cf6275',
'light pink': '#ffd1df',
'mustard': '#ceb301',
'indigo': '#380282',
'lime': '#aaff32',
'sea green': '#53fca1',
'periwinkle': '#8e82fe',
'dark pink': '#cb416b',
'olive green': '#677a04',
'peach': '#ffb07c',
'pale green': '#c7fdb5',
'light brown': '#ad8150',
'hot pink': '#ff028d',
'black': '#000000',
'lilac': '#cea2fd',
'navy blue': '#001146',
'royal blue': '#0504aa',
'beige': '#e6daa6',
'salmon': '#ff796c',
'olive': '#6e750e',
'maroon': '#650021',
'bright green': '#01ff07',
'dark purple': '#35063e',
'mauve': '#ae7181',
'forest green': '#06470c',
'aqua': '#13eac9',
'cyan': '#00ffff',
'tan': '#d1b26f',
'dark blue': '#00035b',
'lavender': '#c79fef',
'turquoise': '#06c2ac',
'dark green': '#033500',
'violet': '#9a0eea',
'light purple': '#bf77f6',
'lime green': '#89fe05',
'grey': '#929591',
'sky blue': '#75bbfd',
'yellow': '#ffff14',
'magenta': '#c20078',
'light green': '#96f97b',
'orange': '#f97306',
'teal': '#029386',
'light blue': '#95d0fc',
'red': '#e50000',
'brown': '#653700',
'pink': '#ff81c0',
'blue': '#0343df',
'green': '#15b01a',
'purple': '#7e1e9c'}
# Normalize name to "xkcd:<name>" to avoid name collisions.
XKCD_COLORS = {'xkcd:' + name: value for name, value in XKCD_COLORS.items()}
# https://drafts.csswg.org/css-color-4/#named-colors
CSS4_COLORS = {
'aliceblue': '#F0F8FF',
'antiquewhite': '#FAEBD7',
'aqua': '#00FFFF',
'aquamarine': '#7FFFD4',
'azure': '#F0FFFF',
'beige': '#F5F5DC',
'bisque': '#FFE4C4',
'black': '#000000',
'blanchedalmond': '#FFEBCD',
'blue': '#0000FF',
'blueviolet': '#8A2BE2',
'brown': '#A52A2A',
'burlywood': '#DEB887',
'cadetblue': '#5F9EA0',
'chartreuse': '#7FFF00',
'chocolate': '#D2691E',
'coral': '#FF7F50',
'cornflowerblue': '#6495ED',
'cornsilk': '#FFF8DC',
'crimson': '#DC143C',
'cyan': '#00FFFF',
'darkblue': '#00008B',
'darkcyan': '#008B8B',
'darkgoldenrod': '#B8860B',
'darkgray': '#A9A9A9',
'darkgreen': '#006400',
'darkgrey': '#A9A9A9',
'darkkhaki': '#BDB76B',
'darkmagenta': '#8B008B',
'darkolivegreen': '#556B2F',
'darkorange': '#FF8C00',
'darkorchid': '#9932CC',
'darkred': '#8B0000',
'darksalmon': '#E9967A',
'darkseagreen': '#8FBC8F',
'darkslateblue': '#483D8B',
'darkslategray': '#2F4F4F',
'darkslategrey': '#2F4F4F',
'darkturquoise': '#00CED1',
'darkviolet': '#9400D3',
'deeppink': '#FF1493',
'deepskyblue': '#00BFFF',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1E90FF',
'firebrick': '#B22222',
'floralwhite': '#FFFAF0',
'forestgreen': '#228B22',
'fuchsia': '#FF00FF',
'gainsboro': '#DCDCDC',
'ghostwhite': '#F8F8FF',
'gold': '#FFD700',
'goldenrod': '#DAA520',
'gray': '#808080',
'green': '#008000',
'greenyellow': '#ADFF2F',
'grey': '#808080',
'honeydew': '#F0FFF0',
'hotpink': '#FF69B4',
'indianred': '#CD5C5C',
'indigo': '#4B0082',
'ivory': '#FFFFF0',
'khaki': '#F0E68C',
'lavender': '#E6E6FA',
'lavenderblush': '#FFF0F5',
'lawngreen': '#7CFC00',
'lemonchiffon': '#FFFACD',
'lightblue': '#ADD8E6',
'lightcoral': '#F08080',
'lightcyan': '#E0FFFF',
'lightgoldenrodyellow': '#FAFAD2',
'lightgray': '#D3D3D3',
'lightgreen': '#90EE90',
'lightgrey': '#D3D3D3',
'lightpink': '#FFB6C1',
'lightsalmon': '#FFA07A',
'lightseagreen': '#20B2AA',
'lightskyblue': '#87CEFA',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#B0C4DE',
'lightyellow': '#FFFFE0',
'lime': '#00FF00',
'limegreen': '#32CD32',
'linen': '#FAF0E6',
'magenta': '#FF00FF',
'maroon': '#800000',
'mediumaquamarine': '#66CDAA',
'mediumblue': '#0000CD',
'mediumorchid': '#BA55D3',
'mediumpurple': '#9370DB',
'mediumseagreen': '#3CB371',
'mediumslateblue': '#7B68EE',
'mediumspringgreen': '#00FA9A',
'mediumturquoise': '#48D1CC',
'mediumvioletred': '#C71585',
'midnightblue': '#191970',
'mintcream': '#F5FFFA',
'mistyrose': '#FFE4E1',
'moccasin': '#FFE4B5',
'navajowhite': '#FFDEAD',
'navy': '#000080',
'oldlace': '#FDF5E6',
'olive': '#808000',
'olivedrab': '#6B8E23',
'orange': '#FFA500',
'orangered': '#FF4500',
'orchid': '#DA70D6',
'palegoldenrod': '#EEE8AA',
'palegreen': '#98FB98',
'paleturquoise': '#AFEEEE',
'palevioletred': '#DB7093',
'papayawhip': '#FFEFD5',
'peachpuff': '#FFDAB9',
'peru': '#CD853F',
'pink': '#FFC0CB',
'plum': '#DDA0DD',
'powderblue': '#B0E0E6',
'purple': '#800080',
'rebeccapurple': '#663399',
'red': '#FF0000',
'rosybrown': '#BC8F8F',
'royalblue': '#4169E1',
'saddlebrown': '#8B4513',
'salmon': '#FA8072',
'sandybrown': '#F4A460',
'seagreen': '#2E8B57',
'seashell': '#FFF5EE',
'sienna': '#A0522D',
'silver': '#C0C0C0',
'skyblue': '#87CEEB',
'slateblue': '#6A5ACD',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#FFFAFA',
'springgreen': '#00FF7F',
'steelblue': '#4682B4',
'tan': '#D2B48C',
'teal': '#008080',
'thistle': '#D8BFD8',
'tomato': '#FF6347',
'turquoise': '#40E0D0',
'violet': '#EE82EE',
'wheat': '#F5DEB3',
'white': '#FFFFFF',
'whitesmoke': '#F5F5F5',
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'}
| {
"repo_name": "ammarkhann/FinalSeniorCode",
"path": "lib/python2.7/site-packages/matplotlib/_color_data.py",
"copies": "12",
"size": "34896",
"license": "mit",
"hash": 7722995428267179000,
"line_mean": 29.4237140366,
"line_max": 76,
"alpha_frac": 0.5059892251,
"autogenerated": false,
"ratio": 2.604373460706023,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 1147
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .colors import palettes
import itertools
SHAPES = [
'o',#circle
'^',#triangle up
'D',#diamond
'v',#triangle down
# '+',#plus # doesn't render in legend for some reason
# 'x',#x # also doesn't render in legend for some reason
's',#square
'*',#star
'p',#pentagon
'8',#octagon
"_",#hline
"|",#vline
"_",#hline
]
def shape_gen():
while True:
for shape in SHAPES:
yield shape
def size_gen(uniq_values):
n = len(uniq_values)
low = 10
for i in range(low, low + n*10, 10):
yield i
def color_gen(n_colors, colors=None):
if colors:
pal = colors
else:
pal = palettes.color_palette(name="husl", n_colors=n_colors)
generator = itertools.cycle(pal)
while True:
yield next(generator)
def identity_gen(uniq_values):
for value in uniq_values:
yield value
# Matplolib is not consistent. Sometimes it does not
# accept abbreviations
# LINETYPES = [
# '-', #solid
# '--', #dashed
# '-.', #dash-dot
# ':', #dotted
# '.', #point
# '|', #vline
# '_', #hline
# ]
LINETYPES = [
'solid',
'dashed',
'dashdot',
'dotted'
]
def linetype_gen():
while True:
for line in LINETYPES:
yield line
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/discretemappers.py",
"copies": "1",
"size": "1404",
"license": "bsd-2-clause",
"hash": -9179615389275454000,
"line_mean": 19.347826087,
"line_max": 68,
"alpha_frac": 0.5512820513,
"autogenerated": false,
"ratio": 3.2128146453089244,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9152611446532986,
"avg_score": 0.022297050015187826,
"num_lines": 69
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
from ggplot.components import aes
from pandas import DataFrame
__ALL__ = ["geom"]
class geom(object):
"""Base class of all Geoms"""
VALID_AES = []
data = None
aes = None
def __init__(self, *args, **kwargs):
# new dict for each geom
self.aes = {}
for arg in args:
if isinstance(arg, aes):
for k, v in arg.items():
if k in self.VALID_AES:
self.aes[k] = v
elif isinstance(arg, DataFrame):
self.data = arg
else:
raise Exception('Unknown argument of type "{0}".'.format(type(arg)))
if "data" in kwargs:
self.data = kwargs.pop("data")
if "mapping" in kwargs:
for k, v in kwargs.pop("mapping").items():
if k in self.VALID_AES:
self.aes[k] = v
if "colour" in kwargs:
kwargs["color"] = kwargs["colour"]
del kwargs["colour"]
self.manual_aes = {}
for k, v in kwargs.items():
if k in self.VALID_AES:
self.manual_aes[k] = v
def __radd__(self, gg):
gg = deepcopy(gg)
gg.geoms.append(self)
return gg
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/geom.py",
"copies": "1",
"size": "1377",
"license": "bsd-2-clause",
"hash": 3150812113927458300,
"line_mean": 29.6,
"line_max": 84,
"alpha_frac": 0.498184459,
"autogenerated": false,
"ratio": 4.026315789473684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9903816365164263,
"avg_score": 0.024136776661884265,
"num_lines": 45
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
from .ggplot import ggplot
class ggtitle(object):
"""
Add a title to your plot
Parameters
----------
title:
Your plot title
Examples
--------
>>> ggplot(mpg, aes(x='hwy')) + geom_hisotgram() + ggtitle("MPG Plot")
"""
def __init__(self, title):
if title is None:
raise Exception("No title specified!")
self.title = title
def __radd__(self, gg):
if isinstance(gg, ggplot):
gg = deepcopy(gg)
gg.title = self.title
return gg
else:
return self
class xlim(object):
"""
Set upper and lower limits for your x axis
Parameters
----------
lower_limit:
lower limit for axis
upper_limit:
upper limit for axis
Examples
--------
>>> ggplot(mpg, aes(x='hwy')) + geom_hisotgram() + xlim(0, 20)
"""
def __init__(self, low = None, high = None):
if low != None :
try:
_ = low - 0
except TypeError:
raise Exception("The 'low' argument to", self.__class__.__name__,
"must be of a numeric type or None")
if high != None :
try:
_ = high - 0
except TypeError:
raise Exception("The 'high' argument to", self.__class__.__name__,
"must be of a numeric type or None")
self.low, self.high = low, high
def __radd__(self, gg):
gg = deepcopy(gg)
gg.xlimits = [self.low, self.high]
return gg
class ylim(object):
"""
Set upper and lower limits for your y axis
Parameters
----------
lower_limit:
lower limit for axis
upper_limit:
upper limit for axis
Examples
--------
>>> ggplot(mpg, aes(x='hwy')) + geom_hisotgram() + ylim(0, 5)
"""
def __init__(self, low = None, high = None):
if low != None :
try:
_ = low - 0
except TypeError:
raise Exception("The 'low' argument to", self.__class__.__name__,
"must be of a numeric type or None")
if high != None :
try:
_ = high - 0
except TypeError:
raise Exception("The 'high' argument to", self.__class__.__name__,
"must be of a numeric type or None")
self.low, self.high = low, high
def __radd__(self, gg):
gg = deepcopy(gg)
gg.ylimits = [self.low, self.high]
return gg
class xlab(object):
"""
Set label for x axis
Parameters
----------
label:
label for your axis
Examples
--------
>>> ggplot(mpg, aes(x='hwy')) + geom_hisotgram() + xlab("Miles / gallon")
"""
def __init__(self, xlab):
if xlab is None:
raise Exception("Arguments to", self.__class__.__name__,
"cannot be None")
self.xlab = xlab
def __radd__(self, gg):
gg = deepcopy(gg)
gg.xlab = self.xlab
return gg
class ylab(object):
"""
Set label for y axis
Parameters
----------
label:
label for your axis
Examples
--------
>>> ggplot(mpg, aes(x='hwy')) + geom_hisotgram() + ylab("Count\n(# of cars)")
"""
def __init__(self, ylab):
if ylab is None:
raise Exception("Arguments to", self.__class__.__name__,
"cannot be None")
self.ylab = ylab
def __radd__(self, gg):
gg = deepcopy(gg)
gg.ylab = self.ylab
return gg
class labs(object):
"""
Set labels plot
Parameters
----------
x:
label for your x axis
y:
label for your y axis
title:
title for your plot
Examples
--------
>>> ggplot(mpg, aes(x='hwy')) + geom_hisotgram() + labs("Miles / gallon", "Count\n(# of cars)", "MPG Plot")
"""
def __init__(self, x=None, y=None, title=None):
self.x = x
self.y = y
self.title = title
def __radd__(self, gg):
gg = deepcopy(gg)
if self.x:
gg.xlab = self.x
if self.y:
gg.ylab = self.y
if self.title:
gg.title = self.title
return gg
if __name__ == '__main__':
xlab("HI")
ylab("hi")
labs(x="hi", y="boo", title="foo")
ggtitle("hi")
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/chart_components.py",
"copies": "1",
"size": "4623",
"license": "bsd-2-clause",
"hash": -4620767025204918000,
"line_mean": 22.8298969072,
"line_max": 111,
"alpha_frac": 0.4715552671,
"autogenerated": false,
"ratio": 3.836514522821577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4808069789921577,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
from ggplot.utils.exceptions import GgplotError
class ggtitle(object):
def __init__(self, title):
if title is None:
raise GgplotError("Arguments to", self.__class__.__name__,
"cannot be None")
self.title = title
def __radd__(self, gg):
gg = deepcopy(gg)
gg.title = self.title
return gg
class xlim(object):
def __init__(self, low = None, high = None):
if low != None :
try:
_ = low - 0
except TypeError:
raise GgplotError("The 'low' argument to", self.__class__.__name__,
"must be of a numeric type or None")
if high != None :
try:
_ = high - 0
except TypeError:
raise GgplotError("The 'high' argument to", self.__class__.__name__,
"must be of a numeric type or None")
self.low, self.high = low, high
def __radd__(self, gg):
gg = deepcopy(gg)
gg.xlimits = [self.low, self.high]
return gg
class ylim(object):
def __init__(self, low = None, high = None):
if low != None :
try:
_ = low - 0
except TypeError:
raise GgplotError("The 'low' argument to", self.__class__.__name__,
"must be of a numeric type or None")
if high != None :
try:
_ = high - 0
except TypeError:
raise GgplotError("The 'high' argument to", self.__class__.__name__,
"must be of a numeric type or None")
self.low, self.high = low, high
def __radd__(self, gg):
gg = deepcopy(gg)
gg.ylimits = [self.low, self.high]
return gg
class xlab(object):
def __init__(self, xlab):
if xlab is None:
raise GgplotError("Arguments to", self.__class__.__name__,
"cannot be None")
self.xlab = xlab
def __radd__(self, gg):
gg = deepcopy(gg)
gg.xlab = self.xlab
return gg
class ylab(object):
def __init__(self, ylab):
if ylab is None:
raise GgplotError("Arguments to", self.__class__.__name__,
"cannot be None")
self.ylab = ylab
def __radd__(self, gg):
gg = deepcopy(gg)
gg.ylab = self.ylab
return gg
class labs(object):
def __init__(self, x=None, y=None, title=None):
self.x = x
self.y = y
self.title = title
def __radd__(self, gg):
gg = deepcopy(gg)
if self.x:
gg.xlab = self.x
if self.y:
gg.ylab = self.y
if self.title:
gg.title = self.title
return gg
| {
"repo_name": "assad2012/ggplot",
"path": "ggplot/geoms/chart_components.py",
"copies": "12",
"size": "2997",
"license": "bsd-2-clause",
"hash": 5701658987707839000,
"line_mean": 27.2735849057,
"line_max": 84,
"alpha_frac": 0.4734734735,
"autogenerated": false,
"ratio": 3.943421052631579,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005494541604109303,
"num_lines": 106
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
import math
from ..utils.ggutils import add_ggplotrc_params
from ggplot.utils.exceptions import GgplotError
class facet_wrap(object):
def __init__(self, x=None, y=None, ncol=None, nrow=None, scales="free"):
if x is None and y is None:
raise GgplotError("You need to specify a variable name: facet_wrap('var')")
add_ggplotrc_params(self)
self.x = x
self.y = y
self.ncol = ncol
self.nrow = nrow
self.scales = scales
def __radd__(self, gg):
# deepcopy must be the first thing to not change the original object
gg = deepcopy(gg)
x, y = None, None
gg.n_dim_x = 1
facets = []
if self.x:
x = gg.data.get(self.x)
gg.n_dim_x = x.nunique()
facets.append(self.x)
if self.y:
y = gg.data.get(self.y)
gg.n_dim_x *= y.nunique()
facets.append(self.y)
n_rows = self.nrow
n_cols = self.ncol
if n_rows is None and n_cols is None:
# calculate both on the fly
n_rows = math.ceil(math.sqrt(gg.n_dim_x))
n_cols = math.ceil(gg.n_dim_x / math.ceil(math.sqrt(gg.n_dim_x)))
elif n_rows is None:
# calculate n_rows on the fly
n_rows = math.ceil(float(gg.n_dim_x) / n_cols)
elif n_cols is None:
# calculate n_columns on the fly
n_cols = math.ceil(float(gg.n_dim_x) / n_rows)
gg.n_rows, gg.n_columns = int(n_rows), int(n_cols)
gg.facets = facets
gg.facet_type = "wrap"
gg.facet_scales = self.scales
return gg
| {
"repo_name": "udacity/ggplot",
"path": "ggplot/geoms/facet_wrap.py",
"copies": "12",
"size": "1806",
"license": "bsd-2-clause",
"hash": 6704561223507445000,
"line_mean": 31.8363636364,
"line_max": 87,
"alpha_frac": 0.5437430786,
"autogenerated": false,
"ratio": 3.213523131672598,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9757266210272598,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
import math
from ..utils.utils import add_ggplotrc_params
from .facet_wrap import facet_wrap
class facet_grid(object):
def __init__(self, x=None, y=None, scales=None):
add_ggplotrc_params(self)
self.x = x
self.y = y
self.ncol = None
self.nrow = None
self.scales = scales
def __radd__(self, gg):
x = gg.data.get(self.x)
y = gg.data.get(self.y)
if x is None and y is None:
raise Exception("No facets provided!")
# only do the deepcopy after the check
gg = deepcopy(gg)
if x is None:
n_dim_x = 1
else:
n_dim_x = x.nunique()
if y is None:
n_dim_y = 1
else:
n_dim_y = y.nunique()
n_dim = n_dim_x * n_dim_y
if self.ncol is None and self.nrow is None:
n_wide = n_dim_x
n_high = n_dim_y
elif self.nrow is None:
n_wide = self.ncol
n_high = math.ceil(float(n_dim) / n_wide)
elif self.ncol is None:
n_high = self.nrow
n_wide = math.ceil(float(n_dim) / n_high)
else:
n_wide = self.ncol
n_high = self.nrow
gg.n_wide, gg.n_high = int(n_wide), int(n_high)
facets = []
if self.x:
facets.append(self.x)
if self.y:
facets.append(self.y)
gg.facets = facets
gg.facet_type = "grid"
gg.facet_scales = self.scales
combos = []
for x_i in sorted(x.unique()):
if y is not None:
for y_i in sorted(y.unique()):
combos.append((x_i, y_i))
else:
combos.append((x_i, 1))
gg.facet_pairs = combos
return gg
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/facet_grid.py",
"copies": "1",
"size": "1947",
"license": "bsd-2-clause",
"hash": 7490102003766881000,
"line_mean": 26.4225352113,
"line_max": 66,
"alpha_frac": 0.489470981,
"autogenerated": false,
"ratio": 3.3225255972696246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43119965782696246,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
import math
from ..utils.utils import add_ggplotrc_params
class facet_wrap(object):
def __init__(self, x=None, y=None, ncol=None, nrow=None, scales="free"):
if x is None and y is None:
raise Exception("You need to specify a variable name: facet_wrap('var')")
add_ggplotrc_params(self)
self.x = x
self.y = y
self.ncol = ncol
self.nrow = nrow
self.scales = scales
def __radd__(self, gg):
# deepcopy must be the first thing to not change the original object
gg = deepcopy(gg)
x, y = None, None
gg.n_dim_x = 1
facets = []
if self.x:
x = gg.data.get(self.x)
gg.n_dim_x = x.nunique()
facets.append(self.x)
if self.y:
y = gg.data.get(self.y)
gg.n_dim_x *= y.nunique()
facets.append(self.y)
# TODO: for some reason this is backwards
n_wide = self.nrow
n_high = self.ncol
if n_wide is None and n_high is None:
# calculate both on the fly
n_wide = math.ceil(math.sqrt(gg.n_dim_x))
n_high = math.ceil(gg.n_dim_x / math.ceil(math.sqrt(gg.n_dim_x)))
elif n_wide is None:
# calculate n_wide on the fly
n_wide = math.ceil(float(gg.n_dim_x) / n_high)
elif n_high is None:
# calculate n_high on the fly
n_high = math.ceil(float(gg.n_dim_x) / n_wide)
gg.n_wide, gg.n_high = int(n_wide), int(n_high)
gg.facets = facets
gg.facet_type = "wrap"
gg.facet_scales = self.scales
return gg
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/facet_wrap.py",
"copies": "1",
"size": "1798",
"license": "bsd-2-clause",
"hash": 386275062830202300,
"line_mean": 31.6909090909,
"line_max": 85,
"alpha_frac": 0.5350389321,
"autogenerated": false,
"ratio": 3.2454873646209386,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42805262967209384,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
import warnings
THEME_PARAMETERS = {
"axis_line": "?",
"axis_text": "?",
"axis_text_x": "?",
"axis_text_y": "?",
"axis_title": "?",
"axis_title_x": "?",
"axis_title_y": "?",
"axis_ticks": "?",
"axis_ticks_length": "?",
"axis_ticks_margin": "?",
"legend_background": "?",
"legend_key": "?",
"legend_key_size": "?",
"legend_key_height": "?",
"legend_key_width": "?",
"legend_margin": "?",
"legend_text": "?",
"legend_text_align": "?",
"legend_title": "?",
"legend_title_align": "?",
"legend_position": "?",
"legend_direction": "?",
"legend_justification": "?",
"legend_box": "?",
"plot_background": "?",
"plot_title": "?",
"plot_margin": "?",
"strip_background": "?",
"strip_text_x": "?",
"strip_text_y": "?",
"panel_background": "?",
"panel_border": "?",
"panel_grid_major_x": "?",
"panel_grid_minor_x": "?",
"panel_grid_major_y": "?",
"panel_grid_minor_y": "?",
"panel_margin": "?"
}
class theme_base(object):
_rcParams = {}
def __init__(self):
pass
def __radd__(self, other):
if other.__class__.__name__=="ggplot":
other.theme = self
return other
return self
def get_rcParams(self):
return self._rcParams
def apply_final_touches(self, ax):
pass
class theme(theme_base):
"""
Custom theme for your plot.
Parameters
----------
title:
title of your plot
plot_title:
title of your plot (same as title)
plot_margin:
size of plot margins
axis_title:
title of your plot (same as title)
axis_title_x:
x axis title
axis_title_y:
y axis title
axis_text:
theme for text
axis_text_x:
theme for x axis text
axis_text_y:
theme for y axis text
Examples
--------
>>> ggplot(mtcars, aes(x='mpg')) + geom_histogram() + theme()
>>> ggplot(mtcars, aes(x='mpg')) + geom_histogram() + theme(plot_margin=dict(bottom=0.2, left=0.2))
>>> ggplot(mtcars, aes(x='mpg')) + geom_histogram() + theme(axis_text=element_text(size=20))
>>> ggplot(mtcars, aes(x='mpg')) + geom_histogram() + theme(x_axis_text=element_text(color="orange"), y_axis_text=element_text(color="blue"))
>>> ggplot(mtcars, aes(x='mpg')) + geom_histogram() + theme(axis_text=element_text(size=20), x_axis_text=element_text(color="orange"), y_axis_text=element_text(color="blue"))
"""
# this maps theme element names to attributes of a ggplot object. there are
# more than one way to say the same thing
ATTRIBUTE_MAPPING = dict(
# title
title="title",
plot_title="title",
axis_title="title",
# margins
plot_margin="margins",
# text for x and y axis labels
axis_title_x="xlab",
axis_title_y="ylab",
axis_text="axis_text",
# text for x-axis
x_axis_text="x_axis_text",
axis_text_x="x_axis_text",
# text for y-axis
y_axis_text="y_axis_text",
axis_text_y="y_axis_text",
)
def __init__(self, *args, **kwargs):
self.things = deepcopy(kwargs)
def __radd__(self, other):
if other.__class__.__name__=="ggplot":
other.theme = self
for key, value in self.things.items():
try:
ggplot_attr_name = self.ATTRIBUTE_MAPPING[key]
except:
msg = "%s is an invalid theme parameter" % key
warnings.warn(msg, RuntimeWarning)
continue
setattr(other, ggplot_attr_name, value)
return other
return self
def parameter_lookup(self, parameter):
return THEME_PARAMETERS.get(parameter)
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/themes/theme.py",
"copies": "1",
"size": "3984",
"license": "bsd-2-clause",
"hash": 9009798452405012000,
"line_mean": 27.0563380282,
"line_max": 178,
"alpha_frac": 0.5323795181,
"autogenerated": false,
"ratio": 3.449350649350649,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44817301674506493,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
class ggtitle(object):
def __init__(self, title):
self.title = title
def __radd__(self, gg):
gg = deepcopy(gg)
gg.title = self.title
return gg
class xlab(object):
def __init__(self, xlab):
self.xlab = xlab
def __radd__(self, gg):
gg = deepcopy(gg)
gg.xlab = self.xlab
return gg
class xlim(object):
def __init__(self, low, high):
self.low, self.high = low, high
def __radd__(self, gg):
gg = deepcopy(gg)
gg.xlimits = [self.low, self.high]
return gg
class ylim(object):
def __init__(self, low, high):
self.low, self.high = low, high
def __radd__(self, gg):
gg = deepcopy(gg)
gg.ylimits = [self.low, self.high]
return gg
class ylab(object):
def __init__(self, ylab):
self.ylab = ylab
def __radd__(self, gg):
gg = deepcopy(gg)
gg.ylab = self.ylab
return gg
class labs(object):
def __init__(self, x=None, y=None, title=None):
self.x = x
self.y = y
self.title = title
def __radd__(self, gg):
gg = deepcopy(gg)
if self.x:
gg.xlab = self.x
if self.y:
gg.ylab = self.y
if self.title:
gg.title = self.title
return gg
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/chart_components.py",
"copies": "1",
"size": "1462",
"license": "bsd-2-clause",
"hash": 7793525908812903000,
"line_mean": 21.84375,
"line_max": 66,
"alpha_frac": 0.5157318741,
"autogenerated": false,
"ratio": 3.234513274336283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4250245148436283,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
class scale_identity(object):
"""
Use the value that you've passed for an aesthetic in the plot without mapping
it to something else. Classic example is if you have a data frame with a column
that's like this:
mycolor
0 blue
1 red
2 green
3 blue
4 red
5 blue
And you want the actual points you plot to show up as blue, red, or green. Under
normal circumstances, ggplot will generate a palette for these colors because it
thinks they are just normal categorical variables. Using scale_identity will make
it so ggplot uses the values of the field as the aesthetic mapping, so the points
will show up as the colors you want.
"""
VALID_SCALES = ["identity_type"]
def __radd__(self, gg):
gg = deepcopy(gg)
gg.scale_identity.add(self.identity_type)
return gg
class scale_alpha_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "alpha"
class scale_color_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "color"
class scale_fill_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "fill"
class scale_linetype_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "linetype"
class scale_shape_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "shape"
class scale_size_identity(scale_identity):
"""
See scale_identity
"""
identity_type = "size"
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/scales/scale_identity.py",
"copies": "1",
"size": "1712",
"license": "bsd-2-clause",
"hash": 5730894995999189000,
"line_mean": 26.1746031746,
"line_max": 85,
"alpha_frac": 0.6331775701,
"autogenerated": false,
"ratio": 3.97215777262181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.510533534272181,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from copy import deepcopy
import pandas as pd
import numpy as np
from matplotlib.cbook import iterable
from ggplot.utils import is_string
import ggplot.stats
from ggplot.utils import is_scalar_or_string
from ggplot.components import aes
from ggplot.utils.exceptions import GgplotError
__all__ = ['geom']
__all__ = [str(u) for u in __all__]
class geom(object):
"""Base class of all Geoms"""
DEFAULT_AES = dict()
REQUIRED_AES = set()
DEFAULT_PARAMS = dict()
data = None
aes = None
manual_aes = None
params = None
# Some geoms require more information than that provided by the
# user. This information is usually another aesthetic variable
# but it could another non-aesthetic variable. It is the duty
# of the associated statistic to calculate this information.
#
# For example:
# A geom may have REQUIRED_AES = {'x', 'y'} and
# the user may map or manually set only aesthetic 'x',
# so the stat would have to calculate 'y'. However this
# may not be enough, to actually make the plot the geom
# may require the 'width' aesthetic. In this case, 'width'
# would be the extra required information.
#
# geoms should fill out this set with what they require
# and is not in REQUIRED_AES
# see: geom_bar, stat_bin
_extra_requires = set()
# Some ggplot aesthetics are named different from the parameters of
# the matplotlib function that will be used to plot.
# This dictionary, of the form {ggplot-aes-name: matplotlib-aes-name},
# connects the two.
#
# geoms should fill it out so that the plot
# information they receive is properly named.
# See: geom_point
_aes_renames = dict()
# A matplotlib plot function may require that an aethestic have a
# single unique value. e.g. linestyle='dashed' and not
# linestyle=['dashed', 'dotted', ...].
# A single call to such a function can only plot lines with the
# same linestyle. However, if the plot we want has more than one
# line with different linestyles, we need to group the lines with
# the same linestyle and plot them as one unit.
#
# geoms should fill out this set with such aesthetics so that the
# plot information they receive can be plotted in a single call.
# Use names as expected by matplotlib
# See: geom_point
_units = set()
def __init__(self, *args, **kwargs):
self.valid_aes = set(self.DEFAULT_AES) ^ self.REQUIRED_AES
self._stat_type = self._get_stat_type(kwargs)
self.aes, self.data, kwargs = self._find_aes_and_data(args, kwargs)
# This set will list the geoms that were uniquely set in this
# geom (not specified already i.e. in the ggplot aes).
self.aes_unique_to_geom = set(self.aes.keys())
if 'colour' in kwargs:
kwargs['color'] = kwargs.pop('colour')
# When a geom is created, some of the parameters may be meant
# for the stat and some for the layer.
# Some arguments are can be identified as either aesthetics to
# the geom and or parameter settings to the stat, in this case
# if the argument has a scalar value it is a setting for the stat.
self._stat_params = {}
self.params = deepcopy(self.DEFAULT_PARAMS)
self.manual_aes = {}
for k, v in kwargs.items():
if k in self.aes:
raise GgplotError('Aesthetic, %s, specified twice' % k)
elif (k in self.valid_aes and
k in self._stat_type.DEFAULT_PARAMS and
is_scalar_or_string(kwargs[k])):
self._stat_params[k] = v
elif k in self.valid_aes:
self.manual_aes[k] = v
elif k in self.DEFAULT_PARAMS:
self.params[k] = v
elif k in self._stat_type.DEFAULT_PARAMS:
self._stat_params[k] = v
else:
raise GgplotError('Cannot recognize argument: %s' % k)
self._cache = {}
# When putting together the plot information for the geoms,
# we need the aethetics names to be matplotlib compatible.
# These are created and stored in self._cache and so would
# go stale if users or geoms change geom.manual_aes
self._create_aes_with_mpl_names()
def plot_layer(self, data, ax):
# Any aesthetic to be overridden by the manual aesthetics
# should not affect the statistics and the unit grouping
# of the data
_cols = set(data.columns) & set(self.manual_aes)
data = data.drop(_cols, axis=1)
data = self._calculate_stats(data)
self._verify_aesthetics(data)
_needed = self.valid_aes | self._extra_requires
data = data[list(set(data.columns) & _needed)]
# aesthetic precedence
# geom.manual_aes > geom.aes > ggplot.aes (part of data)
# NOTE: currently geom.aes is not handled. This may be
# a bad place to do it -- may mess up faceting or just
# inefficient. Probably in ggplot or layer.
data = data.rename(columns=self._aes_renames)
units = self._units & set(data.columns)
# Create plot information that observes the aesthetic precedence
# - (grouped data + manual aesthics)
# - modify previous using statistic
# - previous overwrites the default aesthetics
for _data in self._get_unit_grouped_data(data, units):
_data.update(self._cache['manual_aes_mpl']) # should happen before the grouping
pinfo = deepcopy(self._cache['default_aes_mpl'])
pinfo.update(_data)
self._plot_unit(pinfo, ax)
def _plot_unit(self, pinfo, ax):
msg = "{} should implement this method."
raise NotImplementedError(
msg.format(self.__class__.__name__))
def _get_stat_type(self, kwargs):
"""
Find out the stat and return the type object that can be
used(called) to create it.
For example, if the stat is 'smooth' we return
ggplot.stats.stat_smooth
"""
# get
try:
_name = 'stat_%s' % kwargs['stat']
except KeyError:
_name = 'stat_%s' % self.DEFAULT_PARAMS['stat']
return getattr(ggplot.stats, _name)
def __radd__(self, gg):
gg = deepcopy(gg)
# steal aesthetics info.
self._cache['ggplot.aesthetics'] = deepcopy(gg.aesthetics)
self.aes_unique_to_geom -= set(gg.aesthetics.keys())
# create stat and hand over the parameters it understands
if not hasattr(self, '_stat'):
self._stat = self._stat_type()
self._stat.params.update(self._stat_params)
gg.geoms.append(self)
self.gg = gg
return gg
def _verify_aesthetics(self, data):
"""
Check if all the required aesthetics have been specified.
Raise an Exception if an aesthetic is missing
"""
missing_aes = (self.REQUIRED_AES -
set(self.manual_aes) -
set(data.columns))
if missing_aes:
msg = '{} requires the following missing aesthetics: {}'
raise GgplotError(msg.format(
self.__class__.__name__, ', '.join(missing_aes)))
def _find_aes_and_data(self, args, kwargs):
"""
Identify the aes and data objects.
Return a dictionary of the aes mappings and
the data object.
- args is a list
- kwargs is a dictionary
Note: This is a helper function for self.__init__
It modifies the kwargs
"""
passed_aes = {}
data = None
aes_err = 'Found more than one aes argument. Expecting zero or one'
for arg in args:
if isinstance(arg, aes) and passed_aes:
raise Exception(aes_err)
if isinstance(arg, aes):
passed_aes = arg
elif isinstance(arg, pd.DataFrame):
data = arg
else:
raise GgplotError(
'Unknown argument of type "{0}".'.format(type(arg)))
if 'mapping' in kwargs and passed_aes:
raise GgplotError(aes_err)
elif not passed_aes and 'mapping' in kwargs:
passed_aes = kwargs.pop('mapping')
if data is None and 'data' in kwargs:
data = kwargs.pop('data')
_aes = {}
# To make mapping of columns to geom/stat or stat parameters
# possible
_keep = set(self.DEFAULT_PARAMS) | set(self._stat_type.DEFAULT_PARAMS)
for k, v in passed_aes.items():
if k in self.valid_aes or k in _keep:
_aes[k] = v
else:
raise GgplotError('Cannot recognize aesthetic: %s' % k)
return _aes, data, kwargs
def _calculate_and_rename_stats(self, data):
"""
Use the stat object (self._stat) to compute the stats
and make sure the returned columns are renamed to
matplotlib compatible names
"""
# only rename the new columns,
# so keep track of the original ones
_original = set(data)
data = self._stat._calculate(data)
_d = {}
for old, new in self._aes_renames.items():
if (old in data) and (old not in _original):
_d[new] = data.pop(old)
data.update(_d)
return data
def _calculate_stats(self, data):
"""
Calculate the statistics on each group in the data
The groups are determined by the mappings.
Returns
-------
data : dataframe
"""
self._stat._verify_aesthetics(data)
self._stat._calculate_global(data)
# In most cases 'x' and 'y' mappings do not and
# should not influence the grouping. If this is
# not the desired behaviour then the groups
# parameter should be used.
groups = set(self._cache['ggplot.aesthetics'].keys())
groups = groups & (self.valid_aes - {'x', 'y'})
groups = groups & set(data.columns)
new_data = pd.DataFrame()
# TODO: Find a more effecient way to concatenate
# the dataframes
if groups:
for name, _data in data.groupby(sorted(groups)):
_data = _data.reindex()
_data = self._stat._calculate(_data)
new_data = new_data.append(_data, ignore_index=True)
else:
new_data = self._stat._calculate(data)
return new_data
def _create_aes_with_mpl_names(self):
"""
Create copies of the manual and default aesthetics
with matplotlib compatitble names.
Uses self._aes_renames, and the results are stored
in:
self._cache['manual_aes_mpl']
self._cache['default_aes_mpl']
"""
def _rename_fn(aes_dict):
# to prevent overwrites
_d = {}
for old, new in self._aes_renames.items():
if old in aes_dict:
_d[new] = aes_dict.pop(old)
aes_dict.update(_d)
self._cache['manual_aes_mpl'] = deepcopy(self.manual_aes)
self._cache['default_aes_mpl'] = deepcopy(self.DEFAULT_AES)
_rename_fn(self._cache['manual_aes_mpl'])
_rename_fn(self._cache['default_aes_mpl'])
def _get_unit_grouped_data(self, data, units):
"""
Split data into groups.
The units determine the groups.
Parameters
----------
data : dataframe
The data to be split into groups
units : set
A set of column names in the data and by
which the grouping will happen
Returns
-------
out : list of dict
Each dict represents a unique grouping.
The dicts are of the form
{'column-name': list-of-values | value}
Note
----
This is a helper function for self._plot_layer
"""
out = []
if units:
for name, _data in data.groupby(list(units)):
_data = _data.to_dict('list')
for ae in units:
_data[ae] = _data[ae][0]
out.append(_data)
else:
_data = data.to_dict('list')
out.append(_data)
return out
def sort_by_x(self, pinfo):
"""
Sort the lists in pinfo according to pinfo['x']
This function is useful for geom's that expect
the x-values to come in sorted order
"""
# Remove list types from pinfo
_d = {}
for k in list(pinfo.keys()):
if not is_string(pinfo[k]) and iterable(pinfo[k]):
_d[k] = pinfo.pop(k)
# Sort numerically if all items can be cast
try:
x = list(map(np.float, _d['x']))
except (ValueError, TypeError):
x = _d['x']
# Make sure we don't try to sort something unsortable
try:
idx = np.argsort(x)
# Put sorted lists back in pinfo
for key in _d:
pinfo[key] = [_d[key][i] for i in idx]
except:
pass
return pinfo
| {
"repo_name": "xguse/ggplot",
"path": "ggplot/geoms/geom.py",
"copies": "12",
"size": "13443",
"license": "bsd-2-clause",
"hash": -1784524022633546200,
"line_mean": 34.7526595745,
"line_max": 91,
"alpha_frac": 0.572788812,
"autogenerated": false,
"ratio": 4.0008928571428575,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from cycler import cycler
from .theme import theme_base
from cycler import cycler
class theme_538(theme_base):
"""
Theme for 538.
Copied from CamDavidsonPilon's gist:
https://gist.github.com/CamDavidsonPilon/5238b6499b14604367ac
"""
def __init__(self):
super(theme_538, self).__init__()
self._rcParams["lines.linewidth"] = "2.0"
# self._rcParams["examples.download"] = "True"
self._rcParams["patch.linewidth"] = "0.5"
self._rcParams["legend.fancybox"] = "True"
self._rcParams["axes.prop_cycle"] = cycler('color', [ "#30a2da", "#fc4f30", "#e5ae38",
"#6d904f", "#8b8b8b"])
self._rcParams["axes.facecolor"] = "#f0f0f0"
self._rcParams["axes.labelsize"] = "large"
self._rcParams["axes.axisbelow"] = "True"
self._rcParams["axes.grid"] = "True"
self._rcParams["patch.edgecolor"] = "#f0f0f0"
self._rcParams["axes.titlesize"] = "x-large"
# self._rcParams["svg.embed_char_paths"] = "path"
self._rcParams["examples.directory"] = ""
self._rcParams["figure.facecolor"] = "#f0f0f0"
self._rcParams["grid.linestyle"] = "-"
self._rcParams["grid.linewidth"] = "1.0"
self._rcParams["grid.color"] = "#cbcbcb"
self._rcParams["axes.edgecolor"] = "#f0f0f0"
self._rcParams["xtick.major.size"] = "0"
self._rcParams["xtick.minor.size"] = "0"
self._rcParams["ytick.major.size"] = "0"
self._rcParams["ytick.minor.size"] = "0"
self._rcParams["axes.linewidth"] = "3.0"
self._rcParams["font.size"] ="14.0"
self._rcParams["lines.linewidth"] = "4"
self._rcParams["lines.solid_capstyle"] = "butt"
self._rcParams["savefig.edgecolor"] = "#f0f0f0"
self._rcParams["savefig.facecolor"] = "#f0f0f0"
self._rcParams["figure.subplot.left"] = "0.08"
self._rcParams["figure.subplot.right"] = "0.95"
self._rcParams["figure.subplot.bottom"] = "0.07"
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/themes/theme_538.py",
"copies": "1",
"size": "2149",
"license": "bsd-2-clause",
"hash": -8280359643509341000,
"line_mean": 42.8571428571,
"line_max": 94,
"alpha_frac": 0.5751512331,
"autogenerated": false,
"ratio": 3.1372262773722626,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9199353418991562,
"avg_score": 0.0026048182961401966,
"num_lines": 49
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import datetime
from .crypto import Crypter
from .serializer import URLSafeCookieSerializer
from .session import Session
from .util import parse_settings
def session_factory_factory(secret,
backend=None,
clientside=None,
cookie_name_temporary='gimlet-n',
cookie_name_permanent='gimlet-p',
encryption_key=None,
permanent=False):
"""Configure a :class:`.session.Session` subclass."""
if backend is None:
if clientside is False:
raise ValueError('cannot configure default of clientside=False '
'with no backend present')
clientside = True
else:
clientside = bool(clientside)
if encryption_key:
crypter = Crypter(encryption_key)
else:
crypter = None
future = datetime.fromtimestamp(0x7FFFFFFF)
configuration = {
'backend': backend,
'channel_names': {
},
'channel_opts': {
},
'defaults': {
'permanent': permanent,
'clientside': clientside,
},
'serializer': URLSafeCookieSerializer(secret, backend, crypter),
}
configuration['channel_names']['perm'] = cookie_name_permanent
configuration['channel_names']['nonperm'] = cookie_name_temporary
configuration['channel_opts']['perm'] = {'expires': future}
configuration['channel_opts']['nonperm'] = {}
return type(str('SessionFactory'), (Session,), configuration)
def session_factory_from_settings(settings, prefix='gimlet.'):
"""Configure a :class:`.session.Session` from ``settings``.
See :func:`.util.parse_settings` for more info on how the
``settings`` is parsed.
"""
options = parse_settings(settings, prefix)
return session_factory_factory(**options)
| {
"repo_name": "storborg/gimlet",
"path": "gimlet/factories.py",
"copies": "1",
"size": "2049",
"license": "mit",
"hash": -8501411064591871000,
"line_mean": 29.1323529412,
"line_max": 76,
"alpha_frac": 0.5915080527,
"autogenerated": false,
"ratio": 4.563474387527839,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.565498244022784,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .date_utils import date_breaks, date_format
from .scale import scale
from copy import deepcopy
import six
class scale_x_date(scale):
"""
Position scale, date
Parameters
----------
labels: 'string / data_format'
format for your dates
breaks : string / list of breaks
1) a string specifying the width between breaks.
2) the result of a valid call to `date_breaks`
3) a vector of breaks (TODO: not implemented yet!)
Examples
--------
>>> # 1) manually pass in breaks=date_breaks()
>>> print(ggplot(meat, aes('date','beef')) + \\
... geom_line() + \\
... scale_x_date(breaks=date_breaks('10 years'),
... labels=date_format('%B %-d, %Y')))
>>> # 2) or breaks as just a string
>>> print(ggplot(meat, aes('date','beef')) + \\
... geom_line() + \\
... scale_x_date(breaks='10 years',
... labels=date_format('%B %-d, %Y')))
"""
VALID_SCALES = ['name', 'labels', 'limits', 'breaks', 'trans']
def __radd__(self, gg):
gg = deepcopy(gg)
if self.name:
gg.xlab = self.name.title()
if not (self.labels is None):
if isinstance(self.labels, six.string_types):
self.labels = date_format(self.labels)
gg.xtick_formatter = self.labels
if not (self.limits is None):
gg.xlimits = self.limits
if not (self.breaks is None):
if isinstance(self.breaks, six.string_types):
self.breaks = date_breaks(self.breaks)
gg.xmajor_locator = self.breaks
return gg
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/scales/scale_x_date.py",
"copies": "1",
"size": "1756",
"license": "bsd-2-clause",
"hash": 6191775886401853000,
"line_mean": 34.12,
"line_max": 66,
"alpha_frac": 0.5495444191,
"autogenerated": false,
"ratio": 3.752136752136752,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4801681171236752,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..extern import six
from ..extern.six.moves import zip as izip
import platform
import warnings
import numpy as np
from ..utils.exceptions import AstropyUserWarning
__all__ = ['TableGroups', 'ColumnGroups']
def table_group_by(table, keys):
"""
Get groups for ``table`` on specified ``keys``.
Parameters
----------
table : `Table`
Table to group
keys : str, list of str, `Table`, or Numpy array
Grouping key specifier
Returns
-------
grouped_table : Table object with groups attr set accordingly
"""
from .table import Table
# Pre-convert string to tuple of strings, or Table to the underlying structured array
if isinstance(keys, six.string_types):
keys = (keys,)
if isinstance(keys, (list, tuple)):
for name in keys:
if name not in table.colnames:
raise ValueError('Table does not have key column {0!r}'.format(name))
if table.masked and np.any(table[name].mask):
raise ValueError('Missing values in key column {0!r} are not allowed'.format(name))
keys = tuple(keys)
table_keys = table[keys]
grouped_by_table_cols = True # Grouping keys are columns from the table being grouped
elif isinstance(keys, (np.ndarray, Table)):
table_keys = keys
if len(table_keys) != len(table):
raise ValueError('Input keys array length {0} does not match table length {1}'
.format(len(table_keys), len(table)))
grouped_by_table_cols = False # Grouping key(s) are external
else:
raise TypeError('Keys input must be string, list, tuple or numpy array, but got {0}'
.format(type(keys)))
try:
idx_sort = table_keys.argsort(kind='mergesort')
stable_sort = True
except TypeError:
# Some versions (likely 1.6 and earlier) of numpy don't support
# 'mergesort' for all data types. MacOSX (Darwin) doesn't have a stable
# sort by default, nor does Windows, while Linux does (or appears to).
idx_sort = table_keys.argsort()
stable_sort = platform.system() not in ('Darwin', 'Windows')
table_keys = table_keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], table_keys[1:] != table_keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# If the sort is not stable (preserves original table order) then sort idx_sort in
# place within each group.
if not stable_sort:
for i0, i1 in izip(indices[:-1], indices[1:]):
idx_sort[i0:i1].sort()
# Make a new table and set the _groups to the appropriate TableGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = table.__class__(table[idx_sort])
out_keys = table_keys[indices[:-1]]
if isinstance(out_keys, Table):
out_keys.meta['grouped_by_table_cols'] = grouped_by_table_cols
out._groups = TableGroups(out, indices=indices, keys=out_keys)
return out
def column_group_by(column, keys):
"""
Get groups for ``column`` on specified ``keys``
Parameters
----------
column : Column object
Column to group
keys : Table or Numpy array of same length as col
Grouping key specifier
Returns
-------
grouped_column : Column object with groups attr set accordingly
"""
from .table import Table
if isinstance(keys, Table):
keys = keys.as_array()
if not isinstance(keys, np.ndarray):
raise TypeError('Keys input must be numpy array, but got {0}'
.format(type(keys)))
if len(keys) != len(column):
raise ValueError('Input keys array length {0} does not match column length {1}'
.format(len(keys), len(column)))
idx_sort = keys.argsort()
keys = keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], keys[1:] != keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# Make a new column and set the _groups to the appropriate ColumnGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = column.__class__(column[idx_sort])
out._groups = ColumnGroups(out, indices=indices, keys=keys[indices[:-1]])
return out
class BaseGroups(object):
"""
A class to represent groups within a table of heterogeneous data.
- ``keys``: key values corresponding to each group
- ``indices``: index values in parent table or column corresponding to group boundaries
- ``aggregate()``: method to create new table by aggregating within groups
"""
@property
def parent(self):
return self.parent_column if isinstance(self, ColumnGroups) else self.parent_table
def __iter__(self):
self._iter_index = 0
return self
def next(self):
ii = self._iter_index
if ii < len(self.indices) - 1:
i0, i1 = self.indices[ii], self.indices[ii + 1]
self._iter_index += 1
return self.parent[i0:i1]
else:
raise StopIteration
__next__ = next
def __getitem__(self, item):
parent = self.parent
if isinstance(item, int):
i0, i1 = self.indices[item], self.indices[item + 1]
out = parent[i0:i1]
out.groups._keys = parent.groups.keys[item]
else:
indices0, indices1 = self.indices[:-1], self.indices[1:]
try:
i0s, i1s = indices0[item], indices1[item]
except:
raise TypeError('Index item for groups attribute must be a slice, '
'numpy mask or int array')
mask = np.zeros(len(parent), dtype=np.bool)
# Is there a way to vectorize this in numpy?
for i0, i1 in izip(i0s, i1s):
mask[i0:i1] = True
out = parent[mask]
out.groups._keys = parent.groups.keys[item]
out.groups._indices = np.concatenate([[0], np.cumsum(i1s - i0s)])
return out
def __repr__(self):
return '<{0} indices={1}>'.format(self.__class__.__name__, self.indices)
def __len__(self):
return len(self.indices) - 1
class ColumnGroups(BaseGroups):
def __init__(self, parent_column, indices=None, keys=None):
self.parent_column = parent_column # parent Column
self.parent_table = parent_column.parent_table
self._indices = indices
self._keys = keys
@property
def indices(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.indices
else:
if self._indices is None:
return np.array([0, len(self.parent_column)])
else:
return self._indices
@property
def keys(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.keys
else:
return self._keys
def aggregate(self, func):
from .column import MaskedColumn, col_getattr
i0s, i1s = self.indices[:-1], self.indices[1:]
par_col = self.parent_column
masked = isinstance(par_col, MaskedColumn)
reduceat = hasattr(func, 'reduceat')
sum_case = func is np.sum
mean_case = func is np.mean
try:
if not masked and (reduceat or sum_case or mean_case):
if mean_case:
vals = np.add.reduceat(par_col, i0s) / np.diff(self.indices)
else:
if sum_case:
func = np.add
vals = func.reduceat(par_col, i0s)
else:
vals = np.array([func(par_col[i0: i1]) for i0, i1 in izip(i0s, i1s)])
except Exception:
raise TypeError("Cannot aggregate column '{0}' with type '{1}'"
.format(col_getattr(par_col, 'name'),
col_getattr(par_col, 'dtype')))
out = par_col.__class__(data=vals,
name=col_getattr(par_col, 'name'),
description=col_getattr(par_col, 'description'),
unit=col_getattr(par_col, 'unit'),
format=col_getattr(par_col, 'format'),
meta=col_getattr(par_col, 'meta'))
return out
def filter(self, func):
"""
Filter groups in the Column based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept one argument:
- ``column`` : `Column` object
It must then return either `True` or `False`. As an example, the following
will select all column groups with only positive values::
def all_positive(column):
if np.any(column < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Column
New column with the aggregated rows.
"""
mask = np.empty(len(self), dtype=np.bool)
for i, group_column in enumerate(self):
mask[i] = func(group_column)
return self[mask]
class TableGroups(BaseGroups):
def __init__(self, parent_table, indices=None, keys=None):
self.parent_table = parent_table # parent Table
self._indices = indices
self._keys = keys
@property
def key_colnames(self):
"""
Return the names of columns in the parent table that were used for grouping.
"""
# If the table was grouped by key columns *in* the table then treat those columns
# differently in aggregation. In this case keys will be a Table with
# keys.meta['grouped_by_table_cols'] == True. Keys might not be a Table so we
# need to handle this.
grouped_by_table_cols = getattr(self.keys, 'meta', {}).get('grouped_by_table_cols', False)
return self.keys.colnames if grouped_by_table_cols else ()
@property
def indices(self):
if self._indices is None:
return np.array([0, len(self.parent_table)])
else:
return self._indices
def aggregate(self, func):
"""
Aggregate each group in the Table into a single row by applying the reduction
function ``func`` to group values in each column.
Parameters
----------
func : function
Function that reduces an array of values to a single value
Returns
-------
out : Table
New table with the aggregated rows.
"""
from .column import col_getattr
i0s, i1s = self.indices[:-1], self.indices[1:]
out_cols = []
parent_table = self.parent_table
for col in six.itervalues(parent_table.columns):
# For key columns just pick off first in each group since they are identical
if col_getattr(col, 'name') in self.key_colnames:
new_col = col.take(i0s)
else:
try:
new_col = col.groups.aggregate(func)
except TypeError as err:
warnings.warn(six.text_type(err), AstropyUserWarning)
continue
out_cols.append(new_col)
return parent_table.__class__(out_cols, meta=parent_table.meta)
def filter(self, func):
"""
Filter groups in the Table based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept two arguments:
- ``table`` : `Table` object
- ``key_colnames`` : tuple of column names in ``table`` used as keys for grouping
It must then return either `True` or `False`. As an example, the following
will select all table groups with only positive values in the non-key columns::
def all_positive(table, key_colnames):
colnames = [name for name in table.colnames if name not in key_colnames]
for colname in colnames:
if np.any(table[colname] < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Table
New table with the aggregated rows.
"""
mask = np.empty(len(self), dtype=np.bool)
key_colnames = self.key_colnames
for i, group_table in enumerate(self):
mask[i] = func(group_table, key_colnames)
return self[mask]
@property
def keys(self):
return self._keys
| {
"repo_name": "piotroxp/scibibscan",
"path": "scib/lib/python3.5/site-packages/astropy/table/groups.py",
"copies": "1",
"size": "13125",
"license": "mit",
"hash": 4695564784196390000,
"line_mean": 33.1796875,
"line_max": 99,
"alpha_frac": 0.5730285714,
"autogenerated": false,
"ratio": 4.2270531400966185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5300081711496618,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from functools import wraps
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose, assert_equal
from scipy.spatial import cKDTree
def sort_positions(actual, expected):
assert_equal(len(actual), len(expected))
tree = cKDTree(actual)
devs, argsort = tree.query([expected])
return devs, actual[argsort][0]
def assert_coordinates_close(actual, expected, atol):
_, sorted_actual = sort_positions(actual, expected)
assert_allclose(sorted_actual, expected, atol)
def repeat_check_std(func):
@wraps(func)
def wrapper(test_obj, *args, **kwargs):
global result_table
repeats = test_obj.repeats
actual = []
expected = []
for i in range(repeats):
result = func(test_obj, *args, **kwargs)
if result is None:
continue
a, e = result
if not hasattr(a, '__iter__'):
a = (a,)
if not hasattr(e, '__iter__'):
e = (e,)
assert len(a) == len(e)
actual.append(a)
expected.append(e)
actual = np.array(actual, dtype=np.float).T
expected = np.array(expected, dtype=np.float).T
n_tests = actual.shape[0]
for name in ['names', 'rtol', 'atol', 'fails']:
try:
_var = getattr(test_obj, name)
except AttributeError:
_var = None
if hasattr(_var, '__iter__'):
_var = list(_var)
else:
_var = [_var] * n_tests
if len(_var) < n_tests:
if n_tests % len(_var) == 0:
if name == 'names':
new_var = []
for i in range(int(n_tests // len(_var))):
new_var.extend([n + '_' + str(i) for n in _var])
_var = new_var
else:
_var *= int(n_tests // len(_var)) # broadcast
else:
raise ValueError('{} has the wrong length'.format(name))
setattr(test_obj, name, _var)
_exceptions = []
_result_table = []
for i, (a, e) in enumerate(zip(actual, expected)):
if test_obj.atol[i] is None and test_obj.rtol[i] is None:
continue
n_failed = np.sum(~np.isfinite(a))
rms_dev = np.sqrt(np.sum((a - e)**2))
rms_dev_rel = np.sqrt(np.sum((a / e - 1)**2))
name = test_obj.names[i]
if name is None:
name = func.__name__ + ' ({})'.format(i)
else:
name = func.__name__ + ' ({})'.format(name)
fails = test_obj.fails[i]
if fails is None:
fails = 0
if n_failed > fails:
mesg = '{0:.0f}% of the tests in "{1}" failed'
_exceptions.append(mesg.format(n_failed/repeats*100, name))
if test_obj.atol[i] is not None:
if rms_dev > test_obj.atol[i]:
mesg = 'rms deviation in "{2}" is too large ({0} > {1})'
_exceptions.append(mesg.format(rms_dev, test_obj.atol[i],
name))
if test_obj.rtol[i] is not None:
mesg = 'rms rel. deviation in "{2}" is too large ({0} > {1})'
if rms_dev_rel > test_obj.rtol[i]:
_exceptions.append(mesg.format(rms_dev_rel,
test_obj.rtol[i], name))
res = pd.Series([n_failed, rms_dev, rms_dev_rel], name=name)
_result_table.append(res)
try:
result_table.extend(_result_table)
except NameError:
result_table = _result_table
if len(_exceptions) > 0:
raise AttributeError('\n'.join(_exceptions))
return wrapper
class RepeatedUnitTests(object):
N = 10
@classmethod
def setUpClass(cls):
global result_table
result_table = []
@classmethod
def tearDownClass(cls):
global result_table
results_table = pd.DataFrame(result_table)
results_table.columns = ['fails', 'rms_dev', 'rms_rel_dev']
print('Tests results from {}:'.format(cls.__name__))
print(results_table)
| {
"repo_name": "rbnvrw/circletracking",
"path": "circletracking/tests/common.py",
"copies": "1",
"size": "4479",
"license": "bsd-3-clause",
"hash": 4252565725663310000,
"line_mean": 35.7131147541,
"line_max": 77,
"alpha_frac": 0.4896182184,
"autogenerated": false,
"ratio": 3.999107142857143,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49887253612571425,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.utils import python_2_unicode_compatible
from ..exceptions import ExecuteMSQLError
from ..models import MemberSuiteObject
from ..memberships import services as membership_services
from ..organizations.models import Organization
from ..utils import convert_ms_object
def generate_username(membersuite_object):
"""Return a username suitable for storing in auth.User.username.
Has to be <= 30 characters long. (Until we drop support for
Django 1.4, after which we can define a custom User model with
a larger username field.)
We want to incorporate the membersuite_id in the username.
Those look like this:
6faf90e4-0032-c842-a28a-0b3c8b856f80
That's 36 characters, too long for username. Making the
assumption that those leading digits will always be there in
every ID. Since they're not needed to generate a unique
value, they can go.
After chomping the intro, we're at 27 characters, so we
insert "ms" in the front.
"""
username = "ms" + membersuite_object.membersuite_id[len("6faf90e4"):]
return username
@python_2_unicode_compatible
class PortalUser(MemberSuiteObject):
def __init__(self, membersuite_object_data, session_id=None):
"""Create a PortalUser object from a the Zeep'ed XML representation of
a Membersuite PortalUser.
"""
super(PortalUser, self).__init__(
membersuite_object_data=membersuite_object_data)
self.email_address = self.fields["EmailAddress"]
self.first_name = self.fields["FirstName"]
self.last_name = self.fields["LastName"]
self.owner = self.fields["Owner"]
self.session_id = session_id
def __str__(self):
return ("<PortalUser: ID: {id}, email_address: {email_address}, "
"first_name: {first_name}, last_name: {last_name}, "
"owner: {owner}, session_id: {session_id}>".format(
id=self.membersuite_id,
email_address=self.email_address,
first_name=self.first_name,
last_name=self.last_name,
owner=self.owner,
session_id=self.session_id))
def get_individual(self, client):
"""Return the Individual that owns this PortalUser.
"""
if not client.session_id:
client.request_session()
object_query = ("SELECT OBJECT() FROM INDIVIDUAL "
"WHERE ID = '{}'".format(self.owner))
result = client.execute_object_query(object_query)
msql_result = result["body"]["ExecuteMSQLResult"]
if msql_result["Success"]:
membersuite_object_data = (msql_result["ResultValue"]
["SingleObject"])
else:
raise ExecuteMSQLError(result=result)
return Individual(membersuite_object_data=membersuite_object_data,
portal_user=self)
@python_2_unicode_compatible
class Individual(MemberSuiteObject):
def __init__(self, membersuite_object_data, portal_user=None):
"""Create an Individual object from the Zeep'ed XML representation of
a MemberSuite Individual.
"""
super(Individual, self).__init__(
membersuite_object_data=membersuite_object_data)
self.email_address = self.fields["EmailAddress"]
self.first_name = self.fields["FirstName"]
self.last_name = self.fields["LastName"]
self.title = self.fields["Title"]
self.primary_organization_id = (
self.fields["PrimaryOrganization__rtg"])
self.portal_user = portal_user
def __str__(self):
return ("<Individual: ID: {id}, email_address: {email_address}, "
"first_name: {first_name}, last_name: {last_name}>".format(
id=self.membersuite_id,
email_address=self.email_address,
first_name=self.first_name,
last_name=self.last_name))
@property
def phone_number(self):
numbers = self.fields["PhoneNumbers"]["MemberSuiteObject"]
if len(numbers):
for key_value_pair in (numbers[0]
["Fields"]["KeyValueOfstringanyType"]):
if key_value_pair["Key"] == "PhoneNumber":
return key_value_pair["Value"]
return None
def is_member(self, client):
"""Is this Individual a member?
Assumptions:
- a "primary organization" in MemberSuite is the "current"
Organization for an Individual
- get_memberships_for_org() returns Memberships ordered such
that the first one returned is the "current" one.
"""
if not client.session_id:
client.request_session()
primary_organization = self.get_primary_organization(client=client)
if primary_organization:
membership_service = membership_services.MembershipService(
client=client)
membership = membership_service.get_current_membership_for_org(
account_num=primary_organization.id)
if membership:
return membership.receives_member_benefits
else:
return False
def get_primary_organization(self, client):
"""Return the primary Organization for this Individual.
"""
if self.primary_organization_id is None:
return None
if not client.session_id:
client.request_session()
object_query = ("SELECT OBJECT() FROM ORGANIZATION "
"WHERE ID = '{}'".format(
self.primary_organization_id))
result = client.execute_object_query(object_query)
msql_result = result["body"]["ExecuteMSQLResult"]
if msql_result["Success"]:
membersuite_object_data = (msql_result["ResultValue"]
["SingleObject"])
else:
raise ExecuteMSQLError(result=result)
# Could omit this step if Organization inherits from MemberSuiteObject.
organization = convert_ms_object(
membersuite_object_data["Fields"]["KeyValueOfstringanyType"])
return Organization(org=organization)
| {
"repo_name": "AASHE/python-membersuite-api-client",
"path": "membersuite_api_client/security/models.py",
"copies": "1",
"size": "6476",
"license": "mit",
"hash": -5742916225707472000,
"line_mean": 34.3879781421,
"line_max": 79,
"alpha_frac": 0.6043854231,
"autogenerated": false,
"ratio": 4.3259853039412155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 183
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.utils import python_2_unicode_compatible
from ..models import MemberSuiteObject
from ..utils import get_new_client, value_for_key
from ..financial import services as financial_services
@python_2_unicode_compatible
class OrderLineItem(MemberSuiteObject):
def __init__(self, membersuite_object_data, session_id=None):
"""Create an OrderLineItem object from a the Zeep'ed XML
representation of a Membersuite OrderLineItem.
"""
membersuite_id = value_for_key(
membersuite_object_data=membersuite_object_data,
key="OrderLineItemID")
super(OrderLineItem, self).__init__(
membersuite_object_data=membersuite_object_data,
membersuite_id=membersuite_id)
self.product_id = self.fields["Product"]
self.session_id = session_id
def __str__(self):
return ("<Order: ID: {id}, product: {product} "
" session_id: {session_id}>".format(
id=self.membersuite_id,
product=self.product,
session_id=self.session_id))
def get_product(self, client=None):
"""Return a Product object for this line item.
"""
client = client or get_new_client(request_session=True)
if not client.session_id:
client.request_session()
product = financial_services.get_product(
membersuite_id=self.product_id,
client=client)
return product
@python_2_unicode_compatible
class Order(MemberSuiteObject):
def __init__(self, membersuite_object_data, session_id=None):
"""Create an Order object from a the Zeep'ed XML representation of
a Membersuite Order.
"""
super(Order, self).__init__(
membersuite_object_data=membersuite_object_data)
self.session_id = session_id
def __str__(self):
return ("<Order: ID: {id}, Line Items: {line_items} "
" session_id: {session_id}>".format(
id=self.membersuite_id,
line_items=self.line_items,
session_id=self.session_id))
@property
def line_items(self):
"""Returns the OrderLineItem objects for line items
in this order.
"""
membersuite_object_data = (
self.fields["LineItems"]["MemberSuiteObject"])
line_items = []
for datum in membersuite_object_data:
line_items.append(OrderLineItem(membersuite_object_data=datum))
return line_items
def get_products(self, client=None):
"""A list of Product objects in this Order.
"""
client = client or get_new_client(request_session=True)
if not client.session_id:
client.request_session()
products = []
for line_item in self.line_items:
products.append(line_item.get_product(client=client))
return products
| {
"repo_name": "AASHE/python-membersuite-api-client",
"path": "membersuite_api_client/orders/models.py",
"copies": "1",
"size": "3066",
"license": "mit",
"hash": 2984745573632088000,
"line_mean": 33.8409090909,
"line_max": 75,
"alpha_frac": 0.6053489889,
"autogenerated": false,
"ratio": 4.109919571045577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 88
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.utils import python_2_unicode_compatible
@python_2_unicode_compatible
class MemberSuiteAPIError(Exception):
def __init__(self, result):
self.result = result
self.exception_type = self.__class__.__name__
def __str__(self):
concierge_error = self.get_concierge_error()
return "<{exception_type} ConciergeError: {concierge_error}>".format(
exception_type=self.exception_type,
concierge_error=concierge_error)
def get_concierge_error(self):
try:
return (self.result["body"][self.result_type]
["Errors"]["ConciergeError"])
except KeyError:
return (self.result["Errors"])
class LoginToPortalError(MemberSuiteAPIError):
pass
class LogoutError(MemberSuiteAPIError):
pass
class ExecuteMSQLError(MemberSuiteAPIError):
pass
class NoResultsError(MemberSuiteAPIError):
pass
class NotAnObjectQuery(MemberSuiteAPIError):
pass
| {
"repo_name": "AASHE/python-membersuite-api-client",
"path": "membersuite_api_client/exceptions.py",
"copies": "1",
"size": "1095",
"license": "mit",
"hash": -4950093015576821000,
"line_mean": 23.8863636364,
"line_max": 77,
"alpha_frac": 0.6566210046,
"autogenerated": false,
"ratio": 3.9388489208633093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.509546992546331,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .geom import geom
from scipy.stats import gaussian_kde
import numpy as np
class geom_density(geom):
DEFAULT_AES = {'alpha': None, 'color': 'black', 'fill': None,
'linetype': 'solid', 'size': 1.0, 'weight': None}
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'stat': 'density', 'position': 'identity'}
_extra_requires = {'y'}
_aes_renames = {'linetype': 'linestyle', 'size': 'linewidth',
'fill': 'facecolor'}
_units = {'alpha', 'color', 'facecolor', 'linestyle', 'linewidth'}
def _plot_unit(self, pinfo, ax):
x = pinfo.pop('x')
y = pinfo.pop('y')
# Only meant to for the stat
del pinfo['weight']
# These do not apply to the line
_alpha = pinfo.pop('alpha')
_fc = pinfo.pop('facecolor')
ax.plot(x, y, **pinfo)
if _fc not in (None, False):
_c = pinfo.pop('color')
pinfo.pop('linewidth')
pinfo['alpha'] = _alpha
pinfo['facecolor'] = _c if _fc == True else _fc
ax.fill_between(x, y1=np.zeros(len(x)), y2=y, **pinfo)
| {
"repo_name": "bitemyapp/ggplot",
"path": "ggplot/geoms/geom_density.py",
"copies": "12",
"size": "1225",
"license": "bsd-2-clause",
"hash": 2265743410668405500,
"line_mean": 32.1081081081,
"line_max": 70,
"alpha_frac": 0.5355102041,
"autogenerated": false,
"ratio": 3.441011235955056,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9976521440055056,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .geom import geom
class geom_hline(geom):
DEFAULT_AES = {'color': 'black', 'linetype': 'solid',
'size': 1.0, 'alpha': None, 'y': None,
'xmin': None, 'xmax': None}
REQUIRED_AES = {'yintercept'}
DEFAULT_PARAMS = {'stat': 'hline', 'position': 'identity',
'show_guide': False}
_aes_renames = {'size': 'linewidth', 'linetype': 'linestyle'}
_units = {'alpha'}
def _plot_unit(self, pinfo, ax):
del pinfo['y']
xmin = pinfo.pop('xmin')
if xmin is None:
xmin, _ = ax.get_xlim()
xmax = pinfo.pop('xmax')
if xmax is None:
_, xmax = ax.get_xlim()
y = pinfo.pop('yintercept')
# TODO: if y is not the same length as
# the other aesthetics, default aesthetics should
# should be for the array-like aesthetics
ax.hlines(y, xmin, xmax, **pinfo)
| {
"repo_name": "mizzao/ggplot",
"path": "ggplot/geoms/geom_hline.py",
"copies": "12",
"size": "1035",
"license": "bsd-2-clause",
"hash": 1622467416635078000,
"line_mean": 32.3870967742,
"line_max": 66,
"alpha_frac": 0.5304347826,
"autogenerated": false,
"ratio": 3.6062717770034842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .geom import geom
class geom_vline(geom):
DEFAULT_AES = {'color': 'black', 'linetype': 'solid',
'size': 1.0, 'alpha': None, 'x': None,
'ymin': None, 'ymax': None}
REQUIRED_AES = {'xintercept'}
DEFAULT_PARAMS = {'stat': 'vline', 'position': 'identity',
'show_guide': False}
_aes_renames = {'size': 'linewidth', 'linetype': 'linestyle'}
_units = {'alpha'}
def _plot_unit(self, pinfo, ax):
del pinfo['x']
ymin = pinfo.pop('ymin')
if ymin is None:
ymin, _ = ax.get_ylim()
ymax = pinfo.pop('ymax')
if ymax is None:
_, ymax = ax.get_ylim()
x = pinfo.pop('xintercept')
# TODO: if x is not the same length as
# the other aesthetics, default aesthetics
# should be for the array-like aesthetics
# problem illustrated by:
# gg = ggplot(aes(x="x", y="y", shape="cat2",
# color="cat"), data=df)
# gg + geom_point() + geom_vline(xintercept=40)
# vertical line should be black
#
# This is probably a good test for handling
# aesthetics properly along the whole pipeline.
# The problem should clear up when that is the case,
# and the above code should be added as a test case
ax.vlines(x, ymin, ymax, **pinfo)
| {
"repo_name": "benslice/ggplot",
"path": "ggplot/geoms/geom_vline.py",
"copies": "12",
"size": "1496",
"license": "bsd-2-clause",
"hash": 8351964687573554000,
"line_mean": 35.487804878,
"line_max": 66,
"alpha_frac": 0.5414438503,
"autogenerated": false,
"ratio": 3.6666666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..ggplot import ggplot
from ..aes import aes
class geom(object):
_aes_renames = {}
DEFAULT_AES = {}
REQUIRED_AES = {}
def __init__(self, *args, **kwargs):
self.layers = [self]
self.params = kwargs
self.geom_aes = None
if len(args) > 0:
if isinstance(args[0], aes):
self.geom_aes = args[0]
self.VALID_AES = set()
self.VALID_AES.update(self.DEFAULT_AES.keys())
self.VALID_AES.update(self.REQUIRED_AES)
self.VALID_AES.update(self._aes_renames.keys())
def __radd__(self, gg):
if isinstance(gg, ggplot):
gg.layers += self.layers
return gg
self.layers.append(gg)
return self
def _rename_parameters(self, params):
pass
def _update_data(self, data, _aes):
if 'mapping' in self.params:
_aes = self.params['mapping']
if not 'data' in self.params:
data = _aes._evaluate_expressions(data)
data = _aes.handle_identity_values(data)
if 'data' in self.params:
data = _aes._evaluate_expressions(self.params['data'])
data = _aes.handle_identity_values(data)
return (data, _aes)
def _get_plot_args(self, data, _aes):
mpl_params = {}
mpl_params.update(self.DEFAULT_AES)
# handle the case that the geom has overriding aes passed as an argument
if self.geom_aes:
_aes.update(self.geom_aes)
# for non-continuous values (i.e. shape), need to only pass 1 value
# into matplotlib. for example instead if ['+', '+', '+', ..., '+'] you'd
# want to pass in '+'
for key, value in _aes.items():
if value not in data:
mpl_params[key] = value
elif data[value].nunique()==1:
mpl_params[key] = data[value].iloc[0]
else:
mpl_params[key] = data[value]
# parameters passed to the geom itself override the aesthetics
mpl_params.update(self.params)
items = list(mpl_params.items())
for key, value in items:
if key not in self.VALID_AES:
del mpl_params[key]
elif key in self._aes_renames:
new_key = self._aes_renames[key]
mpl_params[new_key] = value
del mpl_params[key]
for req in self.REQUIRED_AES:
if req not in mpl_params:
raise Exception("%s needed for %s" % (req, str(self)))
else:
del mpl_params[req]
for key, value in self.DEFAULT_PARAMS.items():
if key not in self.params:
self.params[key] = value
return mpl_params
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/geoms/geom.py",
"copies": "1",
"size": "2915",
"license": "bsd-2-clause",
"hash": 391765440212007300,
"line_mean": 31.3888888889,
"line_max": 81,
"alpha_frac": 0.5313893654,
"autogenerated": false,
"ratio": 3.912751677852349,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4944141043252349,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .handlers_base import HandlerBase
import errno
import six
import logging
import numpy as np
import uuid
import os
import os.path as op
import datetime
import filestore.api as fsc
from .utils import _make_sure_path_exists
logger = logging.getLogger(__name__)
class NpyWriter(HandlerBase):
"""
Class to handle writing a numpy array out to disk and registering
that write with FileStore.
This class is only good for one call to add_data.
Parameters
----------
fpath : str
Path (including filename) of where to save the file
resource_kwargs : dict, optional
Saved in the resource_kwargs field of the fileBase document. Valid
keys are {mmap_mode, }
"""
SPEC_NAME = 'npy'
def __init__(self, fpath, resource_kwargs=None):
if op.exists(fpath):
raise IOError("the requested file {fpath} already exist")
self._fpath = fpath
if resource_kwargs is None:
resource_kwargs = dict()
for k in resource_kwargs.keys():
if k != 'mmap_mode':
raise ValueError("The only valid resource_kwargs key is "
"'mmap_mode' "
"you passed in {}".format(k))
self._f_custom = dict(resource_kwargs)
self._writable = True
def add_data(self, data, uid=None, resource_kwargs=None):
"""
Parameters
----------
data : ndarray
The data to save
uid : str, optional
The uid to be used for this entry,
if not given use uuid1 to generate one
resource_kwargs : None, optional
Currently raises if not 'falsy' and is ignored.
Returns
-------
uid : str
The uid used to register this data with filestore, can
be used to retrieve it
"""
if not self._writable:
raise RuntimeError("This writer can only write one data entry "
"and has already been used")
if resource_kwargs:
raise ValueError("This writer does not support resource_kwargs")
if op.exists(self._fpath):
raise IOError("the requested file {fpath} "
"already exist".format(fpath=self._fpath))
if uid is None:
uid = str(uuid.uuid1())
np.save(self._fpath, np.asanyarray(data))
self._writable = False
fb = fsc.insert_resource(self.SPEC_NAME, self._fpath, self._f_custom)
evl = fsc.insert_datum(fb, uid, {})
return evl.datum_id
def save_ndarray(data, base_path=None, filename=None):
"""
Helper method to mindlessly save a numpy array to disk.
Defaults to saving files in :path:`~/.fs_cache/YYYY-MM-DD`
Parameters
----------
data : ndarray
The data to be saved
base_path : str, optional
The base-path to use for saving files. If not given
default to `~/.fs_cache`. Will add a sub-directory for
each day in this path.
filename : str, optional
The name of the file to save to disk. Defaults to a uuid4 if none is
given
"""
if base_path is None:
xdg_data = os.getenv('XDG_DATA_HOME')
if not xdg_data:
xdg_data = op.join(op.expanduser('~'), '.local', 'share')
base_path = op.join(xdg_data, 'fs_cache',
str(datetime.date.today()))
if filename is None:
filename = str(uuid.uuid4())
_make_sure_path_exists(base_path)
fpath = op.join(base_path, filename + '.npy')
with NpyWriter(fpath) as fout:
eid = fout.add_data(data)
return eid
| {
"repo_name": "ericdill/fileStore",
"path": "filestore/file_writers.py",
"copies": "2",
"size": "3826",
"license": "bsd-3-clause",
"hash": -5846959921100458000,
"line_mean": 28.4307692308,
"line_max": 77,
"alpha_frac": 0.5776267642,
"autogenerated": false,
"ratio": 4.1814207650273225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 130
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from hashlib import sha1
from sqlalchemy import Column, ForeignKey, types, orm
from sqlalchemy.ext.declarative import declarative_base
from ..mixin import ElasticMixin, ESMapping, ESString, ESField
Base = declarative_base()
class Genre(Base, ElasticMixin):
__tablename__ = 'genres'
id = Column(types.String(40), primary_key=True)
title = Column(types.Unicode(40))
def __init__(self, *args, **kwargs):
Base.__init__(self, *args, **kwargs)
self.id = sha1(self.title.encode('utf-8')).hexdigest()
@classmethod
def elastic_mapping(cls):
return ESMapping(
analyzer='content',
properties=ESMapping(
ESString('title', boost=5.0)))
class Movie(Base, ElasticMixin):
__tablename__ = 'movies'
id = Column(types.String(40), primary_key=True)
title = Column(types.Unicode(40))
director = Column(types.Unicode(40))
year = Column(types.Integer)
rating = Column(types.Numeric)
genre_id = Column(None, ForeignKey('genres.id'))
genre = orm.relationship('Genre')
__elastic_parent__ = ('Genre', 'genre_id')
def __init__(self, *args, **kwargs):
Base.__init__(self, *args, **kwargs)
self.id = sha1(self.title.encode('utf-8')).hexdigest()
@property
def genre_title(self):
return self.genre and self.genre.title or ''
@classmethod
def elastic_mapping(cls):
return ESMapping(
analyzer='content',
properties=ESMapping(
ESString('title', boost=5.0),
ESString('director'),
ESField('year'),
ESField('rating'),
ESString('genre_title', analyzer='lowercase')))
class Unindexed(Base):
# Does not inherit from ElasticMixin.
__tablename__ = 'unindexed'
id = Column(types.Integer, primary_key=True)
def get_data():
mystery = Genre(title=u'Mystery')
comedy = Genre(title=u'Comedy')
action = Genre(title=u'Action')
drama = Genre(title=u'Drama')
genres = [mystery, comedy, action, drama]
movies = [
Movie(
title=u'To Catch a Thief',
director=u'Alfred Hitchcock',
year=1955,
rating=7.5,
genre=mystery,
genre_id=mystery.id,
),
Movie(
title=u'Vertigo',
director=u'Alfred Hitchcock',
year=1958,
rating=8.5,
genre=mystery,
genre_id=mystery.id,
),
Movie(
title=u'North by Northwest',
director=u'Alfred Hitchcock',
year=1959,
rating=8.5,
genre=mystery,
genre_id=mystery.id,
),
Movie(
title=u'Destination Tokyo',
director=u'Delmer Daves',
year=1943,
rating=7.1,
genre=action,
genre_id=action.id,
),
Movie(
title=u'Annie Hall',
director=u'Woody Allen',
year=1977,
rating=8.2,
genre=comedy,
genre_id=comedy.id,
),
Movie(
title=u'Sleeper',
director=u'Woody Allen',
year=1973,
rating=7.3,
genre=comedy,
genre_id=comedy.id,
),
Movie(
title=u'Captain Blood',
director=u'Michael Curtiz',
year=1935,
rating=7.8,
genre=action,
genre_id=action.id,
),
Movie(
title=u'Metropolis',
director=u'Fritz Lang',
year=1927,
rating=8.4,
genre=drama,
genre_id=drama.id,
)]
return genres, movies
| {
"repo_name": "storborg/pyramid_es",
"path": "pyramid_es/tests/data.py",
"copies": "1",
"size": "3879",
"license": "mit",
"hash": -2813141910894979600,
"line_mean": 26.1258741259,
"line_max": 66,
"alpha_frac": 0.5287445218,
"autogenerated": false,
"ratio": 3.6354264292408622,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46641709510408624,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from importlib import import_module
from inspect import getmembers, isclass
import six
from .backends.base import BaseBackend
def parse_settings(settings, prefix='gimlet.'):
"""Parse settings and return options.
``settings`` is a dict that contains options for
:func:`.factories.session_factory_factory`. Settings that
don't start with ``prefix`` will be ignored. As a convenience, some
of the options in ``settings`` may be specified as strings.
All of the boolean options can be passed as strings, which will be
parsed by :func:`asbool`.
If `backend` is a string, it must be the name of a module containing
a subclass of :class:`.backends.base.BaseBackend`. If the name
contains one or more dots, it will be considered absolute;
otherwise, it will be considered relative to :mod:`.backends`.
"""
options = {}
bool_options = ('clientside', 'permanent')
for k, v in settings.items():
if k.startswith(prefix):
k = k[len(prefix):]
if k in bool_options:
v = asbool(v)
options[k] = v
if 'secret' not in options:
raise ValueError('secret is required')
if 'backend' in options and options['backend'] is not None:
backend = options['backend']
if isinstance(backend, six.string_types):
predicate = lambda m: (
isclass(m) and
issubclass(m, BaseBackend) and
(m is not BaseBackend))
module_name = backend
if '.' not in module_name:
module_name = 'gimlet.backends.' + backend
backend_module = import_module(module_name)
backend_cls = getmembers(backend_module, predicate)[0][1]
options['backend'] = backend_cls
backend = options['backend']
if not (isclass(backend) and issubclass(backend, BaseBackend)):
raise ValueError('backend must be a subclass of BaseBackend')
backend_cls = options.get('backend')
if backend_cls is not None:
backend_options = {}
for k in list(options.keys()):
if k.startswith('backend.'):
backend_options[k[8:]] = options.pop(k)
options['backend'] = options['backend'](**backend_options)
return options
def asbool(s):
"""Convert value to bool. Copied from pyramid.settings."""
if s is None:
return False
if isinstance(s, bool):
return s
s = str(s).strip()
return s.lower() in ('t', 'true', 'y', 'yes', 'on', '1')
| {
"repo_name": "storborg/gimlet",
"path": "gimlet/util.py",
"copies": "1",
"size": "2659",
"license": "mit",
"hash": -6976359520701304000,
"line_mean": 36.4507042254,
"line_max": 73,
"alpha_frac": 0.611884167,
"autogenerated": false,
"ratio": 4.240829346092504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5352713513092504,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .. import QtCore, QtGui
import six
import sys
import logging
logger = logging.getLogger(__name__)
_defaults = {
"expanded": False,
}
class DisplayDict(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setWindowTitle('Display dictionary example')
self._display_dict = RecursiveTreeWidget()
self.setCentralWidget(self._display_dict)
def set_tree(self, tree):
self._display_dict.fill_widget(tree)
class RecursiveTreeWidget(QtGui.QTreeWidget):
"""
Widget that recursively adds a list, dictionary or nested dictionary to
as a tree widget
Notes
-----
fill_item and fill_widget were taken from:
http://stackoverflow.com/questions/21805047/qtreewidget-to-mirror-python-dictionary
"""
def __init__(self):
QtGui.QTreeWidget.__init__(self)
self.itemClicked.connect(self.who_am_i)
def fill_item(self, node, obj, node_name=None):
node.setExpanded(_defaults["expanded"])
if isinstance(obj, dict):
# the object is a dictionary
for k, v in sorted(six.iteritems(obj)):
dict_child = QtGui.QTreeWidgetItem()
dict_child.setText(0, six.text_type(k))
self.add_child(node, dict_child)
self.fill_item(dict_child, v)
elif isinstance(obj, list):
for v in obj:
list_child = QtGui.QTreeWidgetItem()
self.add_child(node, list_child)
if type(v) is dict:
list_child.setText(0, '[dict]')
self.fill_item(list_child, v)
elif type(v) is list:
list_child.setText(0, '[list]')
self.fill_item(list_child, v)
else:
list_child.setText(0, six.text_type(v))
list_child.setExpanded(_defaults["expanded"])
else:
child = QtGui.QTreeWidgetItem()
if node_name is None:
node_name = obj
child.setText(0, six.text_type(node_name))
self.add_child(node, child)
def add_child(self, node, child):
"""
Add a leaf to the tree at the 'node' position
Parameters
----------
node : QtGui.QTreeWidgetItem()
child : QtGui.QTreeWidgetItem()
"""
node.addChild(child)
def find_root(self, node=None):
""" = =
find the node whose parent is the invisible root item
Parameters
----------
node : QtGui.QTreeWidgetItem, optional
The node whose top level parent you wish to find
Defaults to the currently selected node
Returns
-------
path_to_node : list
list of keys
dict_idx : int
Index of the currently selected search result
"""
if node is None:
node = self._current_selection
path_to_node = []
try:
# get the parent node to track the two levels independently
while True:
path_to_node.insert(0, node.text(0))
# move up the tree
node = node.parent()
except AttributeError:
# this will be thrown when the node is one of the search results
currentIndex = self.currentIndex()
dict_idx = currentIndex.row()
print(dir(currentIndex))
logger.debug("dict_idx: {0}".format(dict_idx))
return path_to_node, dict_idx
def who_am_i(self, obj):
self._current_selection = obj
logger.debug(obj.text(0))
def fill_widget(self, obj):
"""
Throw the 'object' at the recursive tree fill class 'fill_item()'
Parameters
----------
obj : list, dict or obj
"""
self.clear()
self.fill_item(self.invisibleRootItem(), obj)
if __name__ == "__main__":
from metadataStore.userapi.commands import search
def gen_tree():
query = {"owner": "edill", "data": True}
return search(**query)
app = QtGui.QApplication(sys.argv)
dd = DisplayDict()
dd.set_tree(gen_tree())
dd.show()
sys.exit(app.exec_())
| {
"repo_name": "licode/xray-vision",
"path": "xray_vision/qt_widgets/displaydict.py",
"copies": "6",
"size": "4397",
"license": "bsd-3-clause",
"hash": 7535762434622688000,
"line_mean": 29.324137931,
"line_max": 87,
"alpha_frac": 0.5549238117,
"autogenerated": false,
"ratio": 4.187619047619048,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 145
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.dates import DateFormatter
from matplotlib.dates import MinuteLocator, HourLocator, DayLocator
from matplotlib.dates import WeekdayLocator, MonthLocator, YearLocator
def date_format(format='%Y-%m-%d', tz=None):
"""
Format dates
Parameters
----------
format:
Date format using standard strftime format.
tz:
Instance of datetime.tzinfo
Examples
--------
>>> date_format('%b-%y')
>>> date_format('%B %d, %Y')
"""
return DateFormatter(format, tz)
def parse_break_str(txt):
"parses '10 weeks' into tuple (10, week)."
txt = txt.strip()
if len(txt.split()) == 2:
n, units = txt.split()
else:
n,units = 1, txt
units = units.rstrip('s') # e.g. weeks => week
n = int(n)
return n, units
# matplotlib's YearLocator uses different named
# arguments than the others
LOCATORS = {
'minute': MinuteLocator,
'hour': HourLocator,
'day': DayLocator,
'week': WeekdayLocator,
'month': MonthLocator,
'year': lambda interval: YearLocator(base=interval)
}
def date_breaks(width):
"""
Regularly spaced dates
Parameters
----------
width:
an interval specification. must be one of [minute, hour, day, week, month, year]
Examples
--------
>>> date_breaks(width = '1 year')
>>> date_breaks(width = '6 weeks')
>>> date_breaks('months')
"""
period, units = parse_break_str(width)
Locator = LOCATORS.get(units)
locator = Locator(interval=period)
return locator
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/scales/date_utils.py",
"copies": "1",
"size": "1661",
"license": "bsd-2-clause",
"hash": 2517925686804399600,
"line_mean": 24.5538461538,
"line_max": 88,
"alpha_frac": 0.6092715232,
"autogenerated": false,
"ratio": 3.80091533180778,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9890947062510324,
"avg_score": 0.0038479584994913687,
"num_lines": 65
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import os
import re
import signal
import sys
from matplotlib.externals.six import unichr
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import FigureManagerBase
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.backend_bases import NavigationToolbar2
from matplotlib.backend_bases import cursors
from matplotlib.backend_bases import TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
try:
import matplotlib.backends.qt_editor.figureoptions as figureoptions
except ImportError:
figureoptions = None
from .qt_compat import QtCore, QtGui, QtWidgets, _getSaveFileName, __version__
from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
backend_version = __version__
# SPECIAL_KEYS are keys that do *not* return their unicode name
# instead they have manually specified names
SPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',
QtCore.Qt.Key_Shift: 'shift',
QtCore.Qt.Key_Alt: 'alt',
QtCore.Qt.Key_Meta: 'super',
QtCore.Qt.Key_Return: 'enter',
QtCore.Qt.Key_Left: 'left',
QtCore.Qt.Key_Up: 'up',
QtCore.Qt.Key_Right: 'right',
QtCore.Qt.Key_Down: 'down',
QtCore.Qt.Key_Escape: 'escape',
QtCore.Qt.Key_F1: 'f1',
QtCore.Qt.Key_F2: 'f2',
QtCore.Qt.Key_F3: 'f3',
QtCore.Qt.Key_F4: 'f4',
QtCore.Qt.Key_F5: 'f5',
QtCore.Qt.Key_F6: 'f6',
QtCore.Qt.Key_F7: 'f7',
QtCore.Qt.Key_F8: 'f8',
QtCore.Qt.Key_F9: 'f9',
QtCore.Qt.Key_F10: 'f10',
QtCore.Qt.Key_F11: 'f11',
QtCore.Qt.Key_F12: 'f12',
QtCore.Qt.Key_Home: 'home',
QtCore.Qt.Key_End: 'end',
QtCore.Qt.Key_PageUp: 'pageup',
QtCore.Qt.Key_PageDown: 'pagedown',
QtCore.Qt.Key_Tab: 'tab',
QtCore.Qt.Key_Backspace: 'backspace',
QtCore.Qt.Key_Enter: 'enter',
QtCore.Qt.Key_Insert: 'insert',
QtCore.Qt.Key_Delete: 'delete',
QtCore.Qt.Key_Pause: 'pause',
QtCore.Qt.Key_SysReq: 'sysreq',
QtCore.Qt.Key_Clear: 'clear', }
# define which modifier keys are collected on keyboard events.
# elements are (mpl names, Modifier Flag, Qt Key) tuples
SUPER = 0
ALT = 1
CTRL = 2
SHIFT = 3
MODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),
('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),
('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),
('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),
]
if sys.platform == 'darwin':
# in OSX, the control and super (aka cmd/apple) keys are switched, so
# switch them back.
SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'super', # cmd/apple key
QtCore.Qt.Key_Meta: 'control',
})
MODIFIER_KEYS[0] = ('super', QtCore.Qt.ControlModifier,
QtCore.Qt.Key_Control)
MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,
QtCore.Qt.Key_Meta)
def fn_name():
return sys._getframe(1).f_code.co_name
DEBUG = False
cursord = {
cursors.MOVE: QtCore.Qt.SizeAllCursor,
cursors.HAND: QtCore.Qt.PointingHandCursor,
cursors.POINTER: QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION: QtCore.Qt.CrossCursor,
}
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
# make place holder
qApp = None
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
global qApp
if qApp is None:
if DEBUG:
print("Starting up QApplication")
app = QtWidgets.QApplication.instance()
if app is None:
# check for DISPLAY env variable on X11 build of Qt
if hasattr(QtGui, "QX11Info"):
display = os.environ.get('DISPLAY')
if display is None or not re.search(':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
qApp = QtWidgets.QApplication([str(" ")])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
class Show(ShowBase):
def mainloop(self):
# allow KeyboardInterrupt exceptions to close the plot window.
signal.signal(signal.SIGINT, signal.SIG_DFL)
global qApp
qApp.exec_()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
thisFig = Figure(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQT(figure)
manager = FigureManagerQT(canvas, num)
return manager
class TimerQT(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Qt4 timer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timeout() signal to the
# _on_timer method.
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._timer_set_interval()
def __del__(self):
# Probably not necessary in practice, but is good behavior to
# disconnect
try:
TimerBase.__del__(self)
self._timer.timeout.disconnect(self._on_timer)
except RuntimeError:
# Timer C++ object already deleted
pass
def _timer_set_single_shot(self):
self._timer.setSingleShot(self._single)
def _timer_set_interval(self):
self._timer.setInterval(self._interval)
def _timer_start(self):
self._timer.start()
def _timer_stop(self):
self._timer.stop()
class FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):
# map Qt button codes to MouseEvent's ones:
buttond = {QtCore.Qt.LeftButton: 1,
QtCore.Qt.MidButton: 2,
QtCore.Qt.RightButton: 3,
# QtCore.Qt.XButton1: None,
# QtCore.Qt.XButton2: None,
}
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQt qt5: ', figure)
_create_qApp()
# NB: Using super for this call to avoid a TypeError:
# __init__() takes exactly 2 arguments (1 given) on QWidget
# PyQt5
# The need for this change is documented here
# http://pyqt.sourceforge.net/Docs/PyQt5/pyqt4_differences.html#cooperative-multi-inheritance
super(FigureCanvasQT, self).__init__(figure=figure)
self.figure = figure
self.setMouseTracking(True)
w, h = self.get_width_height()
self.resize(w, h)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, guiEvent=event)
def leaveEvent(self, event):
QtWidgets.QApplication.restoreOverrideCursor()
FigureCanvasBase.leave_notify_event(self, guiEvent=event)
def mousePressEvent(self, event):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y, button,
guiEvent=event)
if DEBUG:
print('button pressed:', event.button())
def mouseDoubleClickEvent(self, event):
x = event.pos().x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.pos().y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y,
button, dblclick=True,
guiEvent=event)
if DEBUG:
print('button doubleclicked:', event.button())
def mouseMoveEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
# if DEBUG: print('mouse move')
def mouseReleaseEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_release_event(self, x, y, button,
guiEvent=event)
if DEBUG:
print('button released')
def wheelEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
# from QWheelEvent::delta doc
if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:
steps = event.angleDelta().y() / 120
else:
steps = event.pixelDelta().y()
if steps != 0:
FigureCanvasBase.scroll_event(self, x, y, steps, guiEvent=event)
if DEBUG:
print('scroll event: delta = %i, '
'steps = %i ' % (event.delta(), steps))
def keyPressEvent(self, event):
key = self._get_key(event)
if key is None:
return
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
if DEBUG:
print('key press', key)
def keyReleaseEvent(self, event):
key = self._get_key(event)
if key is None:
return
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
if DEBUG:
print('key release', key)
def resizeEvent(self, event):
w = event.size().width()
h = event.size().height()
if DEBUG:
print('resize (%d x %d)' % (w, h))
print("FigureCanvasQt.resizeEvent(%d, %d)" % (w, h))
dpival = self.figure.dpi
winch = w / dpival
hinch = h / dpival
self.figure.set_size_inches(winch, hinch)
FigureCanvasBase.resize_event(self)
self.draw_idle()
QtWidgets.QWidget.resizeEvent(self, event)
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minumumSizeHint(self):
return QtCore.QSize(10, 10)
def _get_key(self, event):
if event.isAutoRepeat():
return None
event_key = event.key()
event_mods = int(event.modifiers()) # actually a bitmask
# get names of the pressed modifier keys
# bit twiddling to pick out modifier keys from event_mods bitmask,
# if event_key is a MODIFIER, it should not be duplicated in mods
mods = [name for name, mod_key, qt_key in MODIFIER_KEYS
if event_key != qt_key and (event_mods & mod_key) == mod_key]
try:
# for certain keys (enter, left, backspace, etc) use a word for the
# key, rather than unicode
key = SPECIAL_KEYS[event_key]
except KeyError:
# unicode defines code points up to 0x0010ffff
# QT will use Key_Codes larger than that for keyboard keys that are
# are not unicode characters (like multimedia keys)
# skip these
# if you really want them, you should add them to SPECIAL_KEYS
MAX_UNICODE = 0x10ffff
if event_key > MAX_UNICODE:
return None
key = unichr(event_key)
# qt delivers capitalized letters. fix capitalization
# note that capslock is ignored
if 'shift' in mods:
mods.remove('shift')
else:
key = key.lower()
mods.reverse()
return '+'.join(mods + [key])
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting
periodic events through the backend's native event
loop. Implemented only for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs)
will be executed by the timer every *interval*.
"""
return TimerQT(*args, **kwargs)
def flush_events(self):
global qApp
qApp.processEvents()
def start_event_loop(self, timeout):
FigureCanvasBase.start_event_loop_default(self, timeout)
start_event_loop.__doc__ = \
FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__ = FigureCanvasBase.stop_event_loop_default.__doc__
class MainWindow(QtWidgets.QMainWindow):
closing = QtCore.Signal()
def closeEvent(self, event):
self.closing.emit()
QtWidgets.QMainWindow.closeEvent(self, event)
class FigureManagerQT(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow
"""
def __init__(self, canvas, num):
if DEBUG:
print('FigureManagerQT.%s' % fn_name())
FigureManagerBase.__init__(self, canvas, num)
self.canvas = canvas
self.window = MainWindow()
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self._widgetclosed)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
self.window.setWindowIcon(QtGui.QIcon(image))
# Give the keyboard focus to the figure instead of the
# manager; StrongFocus accepts both tab and click to focus and
# will enable the canvas to process event w/o clicking.
# ClickFocus only takes the focus is the window has been
# clicked
# on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
# http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.canvas.setFocus()
self.window._destroying = False
self.toolbar = self._get_toolbar(self.canvas, self.window)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
self.toolbar.message.connect(self._show_message)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# add text label to status bar
self.statusbar_label = QtWidgets.QLabel()
self.window.statusBar().addWidget(self.statusbar_label)
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
@QtCore.Slot()
def _show_message(self, s):
self.statusbar_label.setText(s)
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _widgetclosed(self):
if self.window._destroying:
return
self.window._destroying = True
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
def destroy(self, *args):
# check for qApp first, as PySide deletes it in its atexit handler
if QtWidgets.QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
self.window.destroyed.connect(self._widgetclosed)
if self.toolbar:
self.toolbar.destroy()
if DEBUG:
print("destroy figure manager")
self.window.close()
def get_window_title(self):
return six.text_type(self.window.windowTitle())
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):
message = QtCore.Signal(str)
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.parent = parent
self.coordinates = coordinates
self._actions = {}
"""A mapping of toolitem method names to their QActions"""
QtWidgets.QToolBar.__init__(self, parent)
NavigationToolbar2.__init__(self, canvas)
def _icon(self, name):
return QtGui.QIcon(os.path.join(self.basedir, name))
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.addSeparator()
else:
a = self.addAction(self._icon(image_file + '.png'),
text, getattr(self, callback))
self._actions[callback] = a
if callback in ['zoom', 'pan']:
a.setCheckable(True)
if tooltip_text is not None:
a.setToolTip(tooltip_text)
if figureoptions is not None:
a = self.addAction(self._icon("qt4_editor_options.png"),
'Customize', self.edit_parameters)
a.setToolTip('Edit curves line and axes parameters')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtWidgets.QLabel("", self)
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.locLabel.setSizePolicy(
QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
if figureoptions is not None:
def edit_parameters(self):
allaxes = self.canvas.figure.get_axes()
if not allaxes:
QtWidgets.QMessageBox.warning(
self.parent, "Error", "There are no axes to edit.")
return
if len(allaxes) == 1:
axes = allaxes[0]
else:
titles = []
for axes in allaxes:
title = axes.get_title()
ylabel = axes.get_ylabel()
label = axes.get_label()
if title:
fmt = "%(title)s"
if ylabel:
fmt += ": %(ylabel)s"
fmt += " (%(axes_repr)s)"
elif ylabel:
fmt = "%(axes_repr)s (%(ylabel)s)"
elif label:
fmt = "%(axes_repr)s (%(label)s)"
else:
fmt = "%(axes_repr)s"
titles.append(fmt % dict(title=title,
ylabel=ylabel, label=label,
axes_repr=repr(axes)))
item, ok = QtWidgets.QInputDialog.getItem(
self.parent, 'Customize', 'Select axes:', titles, 0, False)
if ok:
axes = allaxes[titles.index(six.text_type(item))]
else:
return
figureoptions.figure_edit(axes, self)
def _update_buttons_checked(self):
# sync button checkstates to match active mode
self._actions['pan'].setChecked(self._active == 'PAN')
self._actions['zoom'].setChecked(self._active == 'ZOOM')
def pan(self, *args):
super(NavigationToolbar2QT, self).pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super(NavigationToolbar2QT, self).zoom(*args)
self._update_buttons_checked()
def dynamic_update(self):
self.canvas.draw_idle()
def set_message(self, s):
self.message.emit(s)
if self.coordinates:
self.locLabel.setText(s)
def set_cursor(self, cursor):
if DEBUG:
print('Set cursor', cursor)
self.canvas.setCursor(cursord[cursor])
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in (min(x0, x1), min(y0, y1), w, h)]
self.canvas.drawRectangle(rect)
def remove_rubberband(self):
self.canvas.drawRectangle(None)
def configure_subplots(self):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
dia = SubplotToolQt(self.canvas.figure, self.parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = list(six.iteritems(filetypes))
sorted_filetypes.sort()
default_filetype = self.canvas.get_default_filetype()
startpath = matplotlib.rcParams.get('savefig.directory', '')
startpath = os.path.expanduser(startpath)
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname, filter = _getSaveFileName(self.parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
if startpath == '':
# explicitly missing key or empty str signals to use cwd
matplotlib.rcParams['savefig.directory'] = startpath
else:
# save dir for next time
savefig_dir = os.path.dirname(six.text_type(fname))
matplotlib.rcParams['savefig.directory'] = savefig_dir
try:
self.canvas.print_figure(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", six.text_type(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SubplotToolQt(SubplotTool, UiSubplotTool):
def __init__(self, targetfig, parent):
UiSubplotTool.__init__(self, None)
self.targetfig = targetfig
self.parent = parent
self.donebutton.clicked.connect(self.close)
self.resetbutton.clicked.connect(self.reset)
self.tightlayout.clicked.connect(self.functight)
# constraints
self.sliderleft.valueChanged.connect(self.sliderright.setMinimum)
self.sliderright.valueChanged.connect(self.sliderleft.setMaximum)
self.sliderbottom.valueChanged.connect(self.slidertop.setMinimum)
self.slidertop.valueChanged.connect(self.sliderbottom.setMaximum)
self.defaults = {}
for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace', ):
self.defaults[attr] = getattr(self.targetfig.subplotpars, attr)
slider = getattr(self, 'slider' + attr)
slider.setMinimum(0)
slider.setMaximum(1000)
slider.setSingleStep(5)
slider.valueChanged.connect(getattr(self, 'func' + attr))
self._setSliderPositions()
def _setSliderPositions(self):
for attr in ('left', 'bottom', 'right', 'top', 'wspace', 'hspace', ):
slider = getattr(self, 'slider' + attr)
slider.setSliderPosition(int(self.defaults[attr] * 1000))
def funcleft(self, val):
if val == self.sliderright.value():
val -= 1
val /= 1000.
self.targetfig.subplots_adjust(left=val)
self.leftvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcright(self, val):
if val == self.sliderleft.value():
val += 1
val /= 1000.
self.targetfig.subplots_adjust(right=val)
self.rightvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcbottom(self, val):
if val == self.slidertop.value():
val -= 1
val /= 1000.
self.targetfig.subplots_adjust(bottom=val)
self.bottomvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def functop(self, val):
if val == self.sliderbottom.value():
val += 1
val /= 1000.
self.targetfig.subplots_adjust(top=val)
self.topvalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funcwspace(self, val):
val /= 1000.
self.targetfig.subplots_adjust(wspace=val)
self.wspacevalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def funchspace(self, val):
val /= 1000.
self.targetfig.subplots_adjust(hspace=val)
self.hspacevalue.setText("%.2f" % val)
if self.drawon:
self.targetfig.canvas.draw()
def functight(self):
self.targetfig.tight_layout()
self._setSliderPositions()
self.targetfig.canvas.draw()
def reset(self):
self.targetfig.subplots_adjust(**self.defaults)
self._setSliderPositions()
self.targetfig.canvas.draw()
def error_msg_qt(msg, parent=None):
if not is_string_like(msg):
msg = ','.join(map(str, msg))
QtWidgets.QMessageBox.warning(None, "Matplotlib",
msg, QtGui.QMessageBox.Ok)
def exception_handler(type, value, tb):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename is not None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror is not None:
msg += value.strerror
else:
msg += six.text_type(value)
if len(msg):
error_msg_qt(msg)
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
| {
"repo_name": "rbalda/neural_ocr",
"path": "env/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py",
"copies": "3",
"size": "29695",
"license": "mit",
"hash": 2839919013954376700,
"line_mean": 33.9352941176,
"line_max": 101,
"alpha_frac": 0.5793904698,
"autogenerated": false,
"ratio": 4.033002852098329,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00042496184005094944,
"num_lines": 850
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from nose.tools import (assert_equal, assert_is, assert_is_not,
assert_raises)
import pandas as pd
from ggplot import *
from ggplot.utils.exceptions import GgplotError
from . import cleanup
@cleanup
def test_stat_bin():
# stat_bin needs the 'x' aesthetic to be numeric or a categorical
# and should complain if given anything else
class unknown(object):
pass
x = [unknown()] * 3
y = [1, 2, 3]
df = pd.DataFrame({'x': x, 'y': y})
gg = ggplot(aes(x='x', y='y'), df)
with assert_raises(GgplotError):
print(gg + stat_bin())
@cleanup
def test_stat_abline():
# slope and intercept function should return values
# of the same length
def fn_xy(x, y):
return [1, 2]
def fn_xy2(x, y):
return [1, 2, 3]
gg = ggplot(aes(x='wt', y='mpg'), mtcars)
# same length, no problem
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy))
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy2))
@cleanup
def test_stat_vhabline_functions():
def fn_x(x):
return 1
def fn_y(y):
return 1
def fn_xy(x, y):
return 1
gg = ggplot(aes(x='wt'), mtcars)
# needs y aesthetic
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy))
# needs y aesthetic
with assert_raises(GgplotError):
print(gg + stat_abline(intercept=fn_xy))
gg = ggplot(aes(x='wt', y='mpg'), mtcars)
# Functions with 2 args, no problem
print(gg + stat_abline(slope=fn_xy, intercept=fn_xy))
# slope function should take 2 args
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_x, intercept=fn_xy))
# intercept function should take 2 args
with assert_raises(GgplotError):
print(gg + stat_abline(slope=fn_xy, intercept=fn_y))
# intercept function should take 1 arg
with assert_raises(GgplotError):
print(gg + stat_vline(xintercept=fn_xy))
# intercept function should take 1 arg
with assert_raises(GgplotError):
print(gg + stat_hline(yintercept=fn_xy))
| {
"repo_name": "bitemyapp/ggplot",
"path": "ggplot/tests/test_stat_calculate_methods.py",
"copies": "12",
"size": "2240",
"license": "bsd-2-clause",
"hash": 2629700246106637300,
"line_mean": 27.7179487179,
"line_max": 69,
"alpha_frac": 0.6236607143,
"autogenerated": false,
"ratio": 3.2,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.98236607143,
"avg_score": null,
"num_lines": null
} |